diff --git a/modules/nvidia.nix b/modules/nvidia.nix index d13ce63..b137520 100644 --- a/modules/nvidia.nix +++ b/modules/nvidia.nix @@ -29,7 +29,6 @@ (patch /nvidia/6.13/0001-KBuild-changes.patch) (patch /nvidia/6.13/0002-FROM-AOSC-Use-linux-aperture.c-for-removing-conflict.patch) (patch /nvidia/6.13/0003-FROM-AOSC-TTM-fbdev-emulation-for-Linux-6.13.patch) - (patch /nvidia/6.14/comment-out-date.patch) ]; }).overrideAttrs (finalAttrs': prevAttrs': { # patched builder.sh to not include some egl libraries to prevent apps from blocking nvidia_drm unloading diff --git a/modules/wicked_kernel.nix b/modules/wicked_kernel.nix index ca15673..e99c86f 100644 --- a/modules/wicked_kernel.nix +++ b/modules/wicked_kernel.nix @@ -14,27 +14,27 @@ nixpkgs.overlays = [ (final: prev: { linuxPackages_wicked = final.kernel.linuxPackages_latest.extend (finalAttrs: prevAttrs: { - kernel = prevAttrs.kernel.override (prevAttrs': { + /*kernel = prevAttrs.kernel.override (prevAttrs': { #kernelPatches = builtins.filter (x: !lib.hasPrefix "netfilter-typo-fix" x.name) prevAttrs'.kernelPatches; ignoreConfigErrors = true; argsOverride = let - version = "6.14-git"; + version = "6.13-rc7"; in { inherit version; - modDirVersion = "6.13.0"; + modDirVersion = "6.13.0-rc7"; src = pkgs.fetchgit { url = "https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git"; - rev = "c4b9570cfb63501638db720f3bee9f6dfd044b82"; - hash = "sha256-y9zKzUXb2U7k3yImfq3DYTX3sghQ7+/6xz0M059tyRo="; + rev = "c45323b7560ec87c37c729b703c86ee65f136d75"; + hash = "sha256-t0ZyarZYT+MCLpQ4ObxC4bBcQeqjVyLC0t1GOLn7QDg="; }; /*src = pkgs.fetchzip { url = "https://git.kernel.org/torvalds/t/linux-${version}.tar.gz"; hash = ""; };*/ - }; - }); + /*}; + });*/ xpadneo = prevAttrs.xpadneo.overrideAttrs (finalAttrs': prevAttrs': { src = final.fetchFromGitHub { @@ -70,13 +70,13 @@ ''; in [ - /*{ + { name = "cachyos"; patch = patch /linux/6.13/cachyos.patch; extraConfig = '' AMD_PRIVATE_COLOR y ''; - }*/ + } /*{ name = "HAS_EXECMEM_ROX"; patch = null; @@ -152,7 +152,7 @@ name = "kcore-optimizations"; patch = patch /linux/6.12/kcore-optimizations.patch; }*/ - { + /*{ name = "amd-color-management"; patch = pkgs.fetchpatch { url = "https://github.com/CachyOS/linux/commit/53c3930779ba776a6a4a7ea215fd7a3d225353b3.patch"; @@ -161,7 +161,7 @@ extraConfig = '' AMD_PRIVATE_COLOR y ''; - } + }*/ /*{ name = "faster-suspend-resume"; patch = patch /linux/6.12/PATCH-v1-0-5-Optimize-async-device-suspend-resume.patch; @@ -221,10 +221,10 @@ ARCH_HAS_EXECMEM_ROX y ''; }*/ - /*{ + { name = "BORE"; patch = patch /linux/6.13/0001-linux6.13.y-bore5.9.6.patch; - }*/ + } /*{ name = "mm-unstable"; patch = patch /linux/6.13/mm-unstable.patch; @@ -268,21 +268,13 @@ name = "sched-improvements"; patch = patch /linux/6.13/sched-improvements.patch; }*/ - /*{ - name = "tlb-flush-optimization"; - patch = patch /linux/6.13/tlb-flush-optimization.patch; - }*/ - /*{ - name = "sched-core-2025-01-21"; - patch = patch /linux/6.13/sched-core-2025-01-21.patch; - }*/ { name = "jupiter-mfd"; - patch = patch /linux/6.12/jupiter-mfd.patch; - #patch = null; + #patch = patch /linux/6.12/jupiter-mfd.patch; + patch = null; extraConfig = '' LEDS_STEAMDECK m - EXTCON_STEAMDECK m + #EXTCON_STEAMDECK m MFD_STEAMDECK m SENSORS_STEAMDECK m ''; diff --git a/patches/linux/6.13/cachyos.patch b/patches/linux/6.13/cachyos.patch index 016b00f..2ce88b3 100644 --- a/patches/linux/6.13/cachyos.patch +++ b/patches/linux/6.13/cachyos.patch @@ -1,3 +1,22 @@ +diff --git a/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd b/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd +new file mode 100644 +index 000000000000..2a19584d091e +--- /dev/null ++++ b/Documentation/ABI/testing/sysfs-driver-hid-appletb-kbd +@@ -0,0 +1,13 @@ ++What: /sys/bus/hid/drivers/hid-appletb-kbd//mode ++Date: September, 2023 ++KernelVersion: 6.5 ++Contact: linux-input@vger.kernel.org ++Description: ++ The set of keys displayed on the Touch Bar. ++ Valid values are: ++ == ================= ++ 0 Escape key only ++ 1 Function keys ++ 2 Media/brightness keys ++ 3 None ++ == ================= diff --git a/Documentation/ABI/testing/sysfs-platform-asus-wmi b/Documentation/ABI/testing/sysfs-platform-asus-wmi index 28144371a0f1..765d50b0d9df 100644 --- a/Documentation/ABI/testing/sysfs-platform-asus-wmi @@ -139,10 +158,10 @@ index 28144371a0f1..765d50b0d9df 100644 * 0 - False, * 1 - True diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index 73f2b3b0f4a5..94a7cefd6df8 100644 +index 3872bc6ec49d..5e8881ec6b40 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt -@@ -2277,6 +2277,9 @@ +@@ -2256,6 +2256,9 @@ disable Do not enable intel_pstate as the default scaling driver for the supported processors @@ -152,7 +171,7 @@ index 73f2b3b0f4a5..94a7cefd6df8 100644 active Use intel_pstate driver to bypass the scaling governors layer of cpufreq and provides it own -@@ -4638,6 +4641,15 @@ +@@ -4481,6 +4484,15 @@ nomsi [MSI] If the PCI_MSI kernel config parameter is enabled, this kernel boot option can be used to disable the use of MSI interrupts system-wide. @@ -168,6 +187,161 @@ index 73f2b3b0f4a5..94a7cefd6df8 100644 noioapicquirk [APIC] Disable all boot interrupt quirks. Safety option to keep boot IRQs enabled. This should never be necessary. +diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst +index f48eaa98d22d..fc777c14cff6 100644 +--- a/Documentation/admin-guide/sysctl/vm.rst ++++ b/Documentation/admin-guide/sysctl/vm.rst +@@ -25,6 +25,9 @@ files can be found in mm/swap.c. + Currently, these files are in /proc/sys/vm: + + - admin_reserve_kbytes ++- anon_min_ratio ++- clean_low_ratio ++- clean_min_ratio + - compact_memory + - compaction_proactiveness + - compact_unevictable_allowed +@@ -108,6 +111,67 @@ On x86_64 this is about 128MB. + Changing this takes effect whenever an application requests memory. + + ++anon_min_ratio ++============== ++ ++This knob provides *hard* protection of anonymous pages. The anonymous pages ++on the current node won't be reclaimed under any conditions when their amount ++is below vm.anon_min_ratio. ++ ++This knob may be used to prevent excessive swap thrashing when anonymous ++memory is low (for example, when memory is going to be overfilled by ++compressed data of zram module). ++ ++Setting this value too high (close to 100) can result in inability to ++swap and can lead to early OOM under memory pressure. ++ ++The unit of measurement is the percentage of the total memory of the node. ++ ++The default value is 15. ++ ++ ++clean_low_ratio ++================ ++ ++This knob provides *best-effort* protection of clean file pages. The file pages ++on the current node won't be reclaimed under memory pressure when the amount of ++clean file pages is below vm.clean_low_ratio *unless* we threaten to OOM. ++ ++Protection of clean file pages using this knob may be used when swapping is ++still possible to ++ - prevent disk I/O thrashing under memory pressure; ++ - improve performance in disk cache-bound tasks under memory pressure. ++ ++Setting it to a high value may result in a early eviction of anonymous pages ++into the swap space by attempting to hold the protected amount of clean file ++pages in memory. ++ ++The unit of measurement is the percentage of the total memory of the node. ++ ++The default value is 0. ++ ++ ++clean_min_ratio ++================ ++ ++This knob provides *hard* protection of clean file pages. The file pages on the ++current node won't be reclaimed under memory pressure when the amount of clean ++file pages is below vm.clean_min_ratio. ++ ++Hard protection of clean file pages using this knob may be used to ++ - prevent disk I/O thrashing under memory pressure even with no free swap space; ++ - improve performance in disk cache-bound tasks under memory pressure; ++ - avoid high latency and prevent livelock in near-OOM conditions. ++ ++Setting it to a high value may result in a early out-of-memory condition due to ++the inability to reclaim the protected amount of clean file pages when other ++types of pages cannot be reclaimed. ++ ++The unit of measurement is the percentage of the total memory of the node. ++ ++The default value is 15. ++ ++ + compact_memory + ============== + +@@ -964,6 +1028,14 @@ be 133 (x + 2x = 200, 2x = 133.33). + At 0, the kernel will not initiate swap until the amount of free and + file-backed pages is less than the high watermark in a zone. + ++This knob has no effect if the amount of clean file pages on the current ++node is below vm.clean_low_ratio or vm.clean_min_ratio. In this case, ++only anonymous pages can be reclaimed. ++ ++If the number of anonymous pages on the current node is below ++vm.anon_min_ratio, then only file pages can be reclaimed with ++any vm.swappiness value. ++ + + unprivileged_userfaultfd + ======================== +diff --git a/Documentation/arch/x86/topology.rst b/Documentation/arch/x86/topology.rst +index 7352ab89a55a..c12837e61bda 100644 +--- a/Documentation/arch/x86/topology.rst ++++ b/Documentation/arch/x86/topology.rst +@@ -135,6 +135,10 @@ Thread-related topology information in the kernel: + The ID of the core to which a thread belongs. It is also printed in /proc/cpuinfo + "core_id." + ++ - topology_logical_core_id(); ++ ++ The logical core ID to which a thread belongs. ++ + + + System topology examples +diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst +index ecccc0473da9..6de6b0e6abf3 100644 +--- a/Documentation/core-api/printk-formats.rst ++++ b/Documentation/core-api/printk-formats.rst +@@ -648,6 +648,38 @@ Examples:: + %p4cc Y10 little-endian (0x20303159) + %p4cc NV12 big-endian (0xb231564e) + ++Generic FourCC code ++------------------- ++ ++:: ++ %p4c[hnbl] gP00 (0x67503030) ++ ++Print a generic FourCC code, as both ASCII characters and its numerical ++value as hexadecimal. ++ ++The additional ``h``, ``r``, ``b``, and ``l`` specifiers are used to specify ++host, reversed, big or little endian order data respectively. Host endian ++order means the data is interpreted as a 32-bit integer and the most ++significant byte is printed first; that is, the character code as printed ++matches the byte order stored in memory on big-endian systems, and is reversed ++on little-endian systems. ++ ++Passed by reference. ++ ++Examples for a little-endian machine, given &(u32)0x67503030:: ++ ++ %p4ch gP00 (0x67503030) ++ %p4cl gP00 (0x67503030) ++ %p4cb 00Pg (0x30305067) ++ %p4cr 00Pg (0x30305067) ++ ++Examples for a big-endian machine, given &(u32)0x67503030:: ++ ++ %p4ch gP00 (0x67503030) ++ %p4cl 00Pg (0x30305067) ++ %p4cb gP00 (0x67503030) ++ %p4cr 00Pg (0x30305067) ++ + Rust + ---- + diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst index 274cc7546efc..9c1b15cd89ab 100644 --- a/Documentation/userspace-api/index.rst @@ -572,10 +746,23 @@ index 000000000000..25e7c4aef968 + ``objs`` and in ``alert``. If this is attempted, the function fails + with ``EINVAL``. diff --git a/MAINTAINERS b/MAINTAINERS -index 58c3381725fb..0d712c89fca5 100644 +index 0fa7c5728f1e..16af42c68cca 100644 --- a/MAINTAINERS +++ b/MAINTAINERS -@@ -16750,6 +16750,15 @@ T: git https://github.com/Paragon-Software-Group/linux-ntfs3.git +@@ -7066,6 +7066,12 @@ S: Supported + T: git https://gitlab.freedesktop.org/drm/misc/kernel.git + F: drivers/gpu/drm/sun4i/sun8i* + ++DRM DRIVER FOR APPLE TOUCH BARS ++M: Kerem Karabay ++L: dri-devel@lists.freedesktop.org ++S: Maintained ++F: drivers/gpu/drm/tiny/appletbdrm.c ++ + DRM DRIVER FOR ARM PL111 CLCD + S: Orphan + T: git https://gitlab.freedesktop.org/drm/misc/kernel.git +@@ -16709,6 +16715,15 @@ T: git https://github.com/Paragon-Software-Group/linux-ntfs3.git F: Documentation/filesystems/ntfs3.rst F: fs/ntfs3/ @@ -592,10 +779,10 @@ index 58c3381725fb..0d712c89fca5 100644 M: Finn Thain L: linux-m68k@lists.linux-m68k.org diff --git a/Makefile b/Makefile -index 6a98b8c6a5f5..2828f20f34ed 100644 +index b9464c88ac72..ea555e6a8bf1 100644 --- a/Makefile +++ b/Makefile -@@ -861,11 +861,19 @@ KBUILD_CFLAGS += -fno-delete-null-pointer-checks +@@ -860,11 +860,19 @@ KBUILD_CFLAGS += -fno-delete-null-pointer-checks ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE KBUILD_CFLAGS += -O2 KBUILD_RUSTFLAGS += -Copt-level=2 @@ -616,7 +803,7 @@ index 6a98b8c6a5f5..2828f20f34ed 100644 # depends on `opt-level` and `debug-assertions`, respectively. KBUILD_RUSTFLAGS += -Cdebug-assertions=$(if $(CONFIG_RUST_DEBUG_ASSERTIONS),y,n) diff --git a/arch/Kconfig b/arch/Kconfig -index b8a4ff365582..9b087f9bb413 100644 +index 6682b2a53e34..fe54298ae05c 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1137,7 +1137,7 @@ config ARCH_MMAP_RND_BITS @@ -770,10 +957,10 @@ index 727f99d333b3..4c43b0d2d09f 100644 +468 common process_ksm_disable sys_process_ksm_disable +469 common process_ksm_status sys_process_ksm_status diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index 27c21c9b6a70..e7e282b30b1d 100644 +index ef6cfea9df73..1f824dcab4dc 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -275,7 +275,7 @@ config X86 +@@ -273,7 +273,7 @@ config X86 select HAVE_PCI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -2137,6 +2324,823 @@ index 5eb708bff1c7..b5fe77405938 100644 # # Due to a historical design error, certain syscalls are numbered differently +diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c +index a8defc813c36..d3bb3865c1b1 100644 +--- a/arch/x86/events/rapl.c ++++ b/arch/x86/events/rapl.c +@@ -39,6 +39,10 @@ + * event: rapl_energy_psys + * perf code: 0x5 + * ++ * core counter: consumption of a single physical core ++ * event: rapl_energy_core (power_core PMU) ++ * perf code: 0x1 ++ * + * We manage those counters as free running (read-only). They may be + * use simultaneously by other tools, such as turbostat. + * +@@ -70,18 +74,22 @@ MODULE_LICENSE("GPL"); + /* + * RAPL energy status counters + */ +-enum perf_rapl_events { ++enum perf_rapl_pkg_events { + PERF_RAPL_PP0 = 0, /* all cores */ + PERF_RAPL_PKG, /* entire package */ + PERF_RAPL_RAM, /* DRAM */ + PERF_RAPL_PP1, /* gpu */ + PERF_RAPL_PSYS, /* psys */ + +- PERF_RAPL_MAX, +- NR_RAPL_DOMAINS = PERF_RAPL_MAX, ++ PERF_RAPL_PKG_EVENTS_MAX, ++ NR_RAPL_PKG_DOMAINS = PERF_RAPL_PKG_EVENTS_MAX, + }; + +-static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = { ++#define PERF_RAPL_CORE 0 /* single core */ ++#define PERF_RAPL_CORE_EVENTS_MAX 1 ++#define NR_RAPL_CORE_DOMAINS PERF_RAPL_CORE_EVENTS_MAX ++ ++static const char *const rapl_pkg_domain_names[NR_RAPL_PKG_DOMAINS] __initconst = { + "pp0-core", + "package", + "dram", +@@ -89,6 +97,8 @@ static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = { + "psys", + }; + ++static const char *const rapl_core_domain_name __initconst = "core"; ++ + /* + * event code: LSB 8 bits, passed in attr->config + * any other bit is reserved +@@ -112,7 +122,7 @@ static struct perf_pmu_events_attr event_attr_##v = { \ + * considered as either pkg-scope or die-scope, and we are considering + * them as die-scope. + */ +-#define rapl_pmu_is_pkg_scope() \ ++#define rapl_pkg_pmu_is_pkg_scope() \ + (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \ + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + +@@ -129,7 +139,8 @@ struct rapl_pmu { + struct rapl_pmus { + struct pmu pmu; + unsigned int nr_rapl_pmu; +- struct rapl_pmu *pmus[] __counted_by(nr_rapl_pmu); ++ unsigned int cntr_mask; ++ struct rapl_pmu *rapl_pmu[] __counted_by(nr_rapl_pmu); + }; + + enum rapl_unit_quirk { +@@ -139,44 +150,43 @@ enum rapl_unit_quirk { + }; + + struct rapl_model { +- struct perf_msr *rapl_msrs; +- unsigned long events; ++ struct perf_msr *rapl_pkg_msrs; ++ struct perf_msr *rapl_core_msrs; ++ unsigned long pkg_events; ++ unsigned long core_events; + unsigned int msr_power_unit; + enum rapl_unit_quirk unit_quirk; + }; + + /* 1/2^hw_unit Joule */ +-static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly; +-static struct rapl_pmus *rapl_pmus; +-static unsigned int rapl_cntr_mask; ++static int rapl_pkg_hw_unit[NR_RAPL_PKG_DOMAINS] __read_mostly; ++static int rapl_core_hw_unit __read_mostly; ++static struct rapl_pmus *rapl_pmus_pkg; ++static struct rapl_pmus *rapl_pmus_core; + static u64 rapl_timer_ms; +-static struct perf_msr *rapl_msrs; ++static struct rapl_model *rapl_model; + + /* +- * Helper functions to get the correct topology macros according to the ++ * Helper function to get the correct topology id according to the + * RAPL PMU scope. + */ +-static inline unsigned int get_rapl_pmu_idx(int cpu) +-{ +- return rapl_pmu_is_pkg_scope() ? topology_logical_package_id(cpu) : +- topology_logical_die_id(cpu); +-} +- +-static inline const struct cpumask *get_rapl_pmu_cpumask(int cpu) +-{ +- return rapl_pmu_is_pkg_scope() ? topology_core_cpumask(cpu) : +- topology_die_cpumask(cpu); +-} +- +-static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu) ++static inline unsigned int get_rapl_pmu_idx(int cpu, int scope) + { +- unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu); +- + /* +- * The unsigned check also catches the '-1' return value for non +- * existent mappings in the topology map. ++ * Returns unsigned int, which converts the '-1' return value ++ * (for non-existent mappings in topology map) to UINT_MAX, so ++ * the error check in the caller is simplified. + */ +- return rapl_pmu_idx < rapl_pmus->nr_rapl_pmu ? rapl_pmus->pmus[rapl_pmu_idx] : NULL; ++ switch (scope) { ++ case PERF_PMU_SCOPE_PKG: ++ return topology_logical_package_id(cpu); ++ case PERF_PMU_SCOPE_DIE: ++ return topology_logical_die_id(cpu); ++ case PERF_PMU_SCOPE_CORE: ++ return topology_logical_core_id(cpu); ++ default: ++ return -EINVAL; ++ } + } + + static inline u64 rapl_read_counter(struct perf_event *event) +@@ -186,19 +196,20 @@ static inline u64 rapl_read_counter(struct perf_event *event) + return raw; + } + +-static inline u64 rapl_scale(u64 v, int cfg) ++static inline u64 rapl_scale(u64 v, struct perf_event *event) + { +- if (cfg > NR_RAPL_DOMAINS) { +- pr_warn("Invalid domain %d, failed to scale data\n", cfg); +- return v; +- } ++ int hw_unit = rapl_pkg_hw_unit[event->hw.config - 1]; ++ ++ if (event->pmu->scope == PERF_PMU_SCOPE_CORE) ++ hw_unit = rapl_core_hw_unit; ++ + /* + * scale delta to smallest unit (1/2^32) + * users must then scale back: count * 1/(1e9*2^32) to get Joules + * or use ldexp(count, -32). + * Watts = Joules/Time delta + */ +- return v << (32 - rapl_hw_unit[cfg - 1]); ++ return v << (32 - hw_unit); + } + + static u64 rapl_event_update(struct perf_event *event) +@@ -225,7 +236,7 @@ static u64 rapl_event_update(struct perf_event *event) + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + +- sdelta = rapl_scale(delta, event->hw.config); ++ sdelta = rapl_scale(delta, event); + + local64_add(sdelta, &event->count); + +@@ -240,34 +251,34 @@ static void rapl_start_hrtimer(struct rapl_pmu *pmu) + + static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) + { +- struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer); ++ struct rapl_pmu *rapl_pmu = container_of(hrtimer, struct rapl_pmu, hrtimer); + struct perf_event *event; + unsigned long flags; + +- if (!pmu->n_active) ++ if (!rapl_pmu->n_active) + return HRTIMER_NORESTART; + +- raw_spin_lock_irqsave(&pmu->lock, flags); ++ raw_spin_lock_irqsave(&rapl_pmu->lock, flags); + +- list_for_each_entry(event, &pmu->active_list, active_entry) ++ list_for_each_entry(event, &rapl_pmu->active_list, active_entry) + rapl_event_update(event); + +- raw_spin_unlock_irqrestore(&pmu->lock, flags); ++ raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags); + +- hrtimer_forward_now(hrtimer, pmu->timer_interval); ++ hrtimer_forward_now(hrtimer, rapl_pmu->timer_interval); + + return HRTIMER_RESTART; + } + +-static void rapl_hrtimer_init(struct rapl_pmu *pmu) ++static void rapl_hrtimer_init(struct rapl_pmu *rapl_pmu) + { +- struct hrtimer *hr = &pmu->hrtimer; ++ struct hrtimer *hr = &rapl_pmu->hrtimer; + + hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hr->function = rapl_hrtimer_handle; + } + +-static void __rapl_pmu_event_start(struct rapl_pmu *pmu, ++static void __rapl_pmu_event_start(struct rapl_pmu *rapl_pmu, + struct perf_event *event) + { + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) +@@ -275,39 +286,39 @@ static void __rapl_pmu_event_start(struct rapl_pmu *pmu, + + event->hw.state = 0; + +- list_add_tail(&event->active_entry, &pmu->active_list); ++ list_add_tail(&event->active_entry, &rapl_pmu->active_list); + + local64_set(&event->hw.prev_count, rapl_read_counter(event)); + +- pmu->n_active++; +- if (pmu->n_active == 1) +- rapl_start_hrtimer(pmu); ++ rapl_pmu->n_active++; ++ if (rapl_pmu->n_active == 1) ++ rapl_start_hrtimer(rapl_pmu); + } + + static void rapl_pmu_event_start(struct perf_event *event, int mode) + { +- struct rapl_pmu *pmu = event->pmu_private; ++ struct rapl_pmu *rapl_pmu = event->pmu_private; + unsigned long flags; + +- raw_spin_lock_irqsave(&pmu->lock, flags); +- __rapl_pmu_event_start(pmu, event); +- raw_spin_unlock_irqrestore(&pmu->lock, flags); ++ raw_spin_lock_irqsave(&rapl_pmu->lock, flags); ++ __rapl_pmu_event_start(rapl_pmu, event); ++ raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags); + } + + static void rapl_pmu_event_stop(struct perf_event *event, int mode) + { +- struct rapl_pmu *pmu = event->pmu_private; ++ struct rapl_pmu *rapl_pmu = event->pmu_private; + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + +- raw_spin_lock_irqsave(&pmu->lock, flags); ++ raw_spin_lock_irqsave(&rapl_pmu->lock, flags); + + /* mark event as deactivated and stopped */ + if (!(hwc->state & PERF_HES_STOPPED)) { +- WARN_ON_ONCE(pmu->n_active <= 0); +- pmu->n_active--; +- if (pmu->n_active == 0) +- hrtimer_cancel(&pmu->hrtimer); ++ WARN_ON_ONCE(rapl_pmu->n_active <= 0); ++ rapl_pmu->n_active--; ++ if (rapl_pmu->n_active == 0) ++ hrtimer_cancel(&rapl_pmu->hrtimer); + + list_del(&event->active_entry); + +@@ -325,23 +336,23 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode) + hwc->state |= PERF_HES_UPTODATE; + } + +- raw_spin_unlock_irqrestore(&pmu->lock, flags); ++ raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags); + } + + static int rapl_pmu_event_add(struct perf_event *event, int mode) + { +- struct rapl_pmu *pmu = event->pmu_private; ++ struct rapl_pmu *rapl_pmu = event->pmu_private; + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + +- raw_spin_lock_irqsave(&pmu->lock, flags); ++ raw_spin_lock_irqsave(&rapl_pmu->lock, flags); + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (mode & PERF_EF_START) +- __rapl_pmu_event_start(pmu, event); ++ __rapl_pmu_event_start(rapl_pmu, event); + +- raw_spin_unlock_irqrestore(&pmu->lock, flags); ++ raw_spin_unlock_irqrestore(&rapl_pmu->lock, flags); + + return 0; + } +@@ -354,12 +365,14 @@ static void rapl_pmu_event_del(struct perf_event *event, int flags) + static int rapl_pmu_event_init(struct perf_event *event) + { + u64 cfg = event->attr.config & RAPL_EVENT_MASK; +- int bit, ret = 0; +- struct rapl_pmu *pmu; ++ int bit, rapl_pmus_scope, ret = 0; ++ struct rapl_pmu *rapl_pmu; ++ unsigned int rapl_pmu_idx; ++ struct rapl_pmus *rapl_pmus; + +- /* only look at RAPL events */ +- if (event->attr.type != rapl_pmus->pmu.type) +- return -ENOENT; ++ /* unsupported modes and filters */ ++ if (event->attr.sample_period) /* no sampling */ ++ return -EINVAL; + + /* check only supported bits are set */ + if (event->attr.config & ~RAPL_EVENT_MASK) +@@ -368,26 +381,49 @@ static int rapl_pmu_event_init(struct perf_event *event) + if (event->cpu < 0) + return -EINVAL; + +- if (!cfg || cfg >= NR_RAPL_DOMAINS + 1) ++ rapl_pmus = container_of(event->pmu, struct rapl_pmus, pmu); ++ if (!rapl_pmus) ++ return -EINVAL; ++ rapl_pmus_scope = rapl_pmus->pmu.scope; ++ ++ if (rapl_pmus_scope == PERF_PMU_SCOPE_PKG || rapl_pmus_scope == PERF_PMU_SCOPE_DIE) { ++ /* only look at RAPL package events */ ++ if (event->attr.type != rapl_pmus_pkg->pmu.type) ++ return -ENOENT; ++ ++ cfg = array_index_nospec((long)cfg, NR_RAPL_PKG_DOMAINS + 1); ++ if (!cfg || cfg >= NR_RAPL_PKG_DOMAINS + 1) ++ return -EINVAL; ++ ++ bit = cfg - 1; ++ event->hw.event_base = rapl_model->rapl_pkg_msrs[bit].msr; ++ } else if (rapl_pmus_scope == PERF_PMU_SCOPE_CORE) { ++ /* only look at RAPL core events */ ++ if (event->attr.type != rapl_pmus_core->pmu.type) ++ return -ENOENT; ++ ++ cfg = array_index_nospec((long)cfg, NR_RAPL_CORE_DOMAINS + 1); ++ if (!cfg || cfg >= NR_RAPL_PKG_DOMAINS + 1) ++ return -EINVAL; ++ ++ bit = cfg - 1; ++ event->hw.event_base = rapl_model->rapl_core_msrs[bit].msr; ++ } else + return -EINVAL; +- +- cfg = array_index_nospec((long)cfg, NR_RAPL_DOMAINS + 1); +- bit = cfg - 1; + + /* check event supported */ +- if (!(rapl_cntr_mask & (1 << bit))) ++ if (!(rapl_pmus->cntr_mask & (1 << bit))) + return -EINVAL; + +- /* unsupported modes and filters */ +- if (event->attr.sample_period) /* no sampling */ ++ rapl_pmu_idx = get_rapl_pmu_idx(event->cpu, rapl_pmus_scope); ++ if (rapl_pmu_idx >= rapl_pmus->nr_rapl_pmu) + return -EINVAL; +- + /* must be done before validate_group */ +- pmu = cpu_to_rapl_pmu(event->cpu); +- if (!pmu) ++ rapl_pmu = rapl_pmus->rapl_pmu[rapl_pmu_idx]; ++ if (!rapl_pmu) + return -EINVAL; +- event->pmu_private = pmu; +- event->hw.event_base = rapl_msrs[bit].msr; ++ ++ event->pmu_private = rapl_pmu; + event->hw.config = cfg; + event->hw.idx = bit; + +@@ -404,12 +440,14 @@ RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); + RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); + RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04"); + RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05"); ++RAPL_EVENT_ATTR_STR(energy-core, rapl_core, "event=0x01"); + + RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules"); + RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules"); + RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules"); + RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules"); + RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_psys_unit, "Joules"); ++RAPL_EVENT_ATTR_STR(energy-core.unit, rapl_core_unit, "Joules"); + + /* + * we compute in 0.23 nJ increments regardless of MSR +@@ -419,6 +457,7 @@ RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890 + RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10"); + RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10"); + RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_psys_scale, "2.3283064365386962890625e-10"); ++RAPL_EVENT_ATTR_STR(energy-core.scale, rapl_core_scale, "2.3283064365386962890625e-10"); + + /* + * There are no default events, but we need to create +@@ -451,6 +490,12 @@ static const struct attribute_group *rapl_attr_groups[] = { + NULL, + }; + ++static const struct attribute_group *rapl_core_attr_groups[] = { ++ &rapl_pmu_format_group, ++ &rapl_pmu_events_group, ++ NULL, ++}; ++ + static struct attribute *rapl_events_cores[] = { + EVENT_PTR(rapl_cores), + EVENT_PTR(rapl_cores_unit), +@@ -511,6 +556,18 @@ static struct attribute_group rapl_events_psys_group = { + .attrs = rapl_events_psys, + }; + ++static struct attribute *rapl_events_core[] = { ++ EVENT_PTR(rapl_core), ++ EVENT_PTR(rapl_core_unit), ++ EVENT_PTR(rapl_core_scale), ++ NULL, ++}; ++ ++static struct attribute_group rapl_events_core_group = { ++ .name = "events", ++ .attrs = rapl_events_core, ++}; ++ + static bool test_msr(int idx, void *data) + { + return test_bit(idx, (unsigned long *) data); +@@ -536,11 +593,11 @@ static struct perf_msr intel_rapl_spr_msrs[] = { + }; + + /* +- * Force to PERF_RAPL_MAX size due to: +- * - perf_msr_probe(PERF_RAPL_MAX) ++ * Force to PERF_RAPL_PKG_EVENTS_MAX size due to: ++ * - perf_msr_probe(PERF_RAPL_PKG_EVENTS_MAX) + * - want to use same event codes across both architectures + */ +-static struct perf_msr amd_rapl_msrs[] = { ++static struct perf_msr amd_rapl_pkg_msrs[] = { + [PERF_RAPL_PP0] = { 0, &rapl_events_cores_group, NULL, false, 0 }, + [PERF_RAPL_PKG] = { MSR_AMD_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr, false, RAPL_MSR_MASK }, + [PERF_RAPL_RAM] = { 0, &rapl_events_ram_group, NULL, false, 0 }, +@@ -548,18 +605,25 @@ static struct perf_msr amd_rapl_msrs[] = { + [PERF_RAPL_PSYS] = { 0, &rapl_events_psys_group, NULL, false, 0 }, + }; + +-static int rapl_check_hw_unit(struct rapl_model *rm) ++static struct perf_msr amd_rapl_core_msrs[] = { ++ [PERF_RAPL_CORE] = { MSR_AMD_CORE_ENERGY_STATUS, &rapl_events_core_group, ++ test_msr, false, RAPL_MSR_MASK }, ++}; ++ ++static int rapl_check_hw_unit(void) + { + u64 msr_rapl_power_unit_bits; + int i; + + /* protect rdmsrl() to handle virtualization */ +- if (rdmsrl_safe(rm->msr_power_unit, &msr_rapl_power_unit_bits)) ++ if (rdmsrl_safe(rapl_model->msr_power_unit, &msr_rapl_power_unit_bits)) + return -1; +- for (i = 0; i < NR_RAPL_DOMAINS; i++) +- rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; ++ for (i = 0; i < NR_RAPL_PKG_DOMAINS; i++) ++ rapl_pkg_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; + +- switch (rm->unit_quirk) { ++ rapl_core_hw_unit = (msr_rapl_power_unit_bits >> 8) & 0x1FULL; ++ ++ switch (rapl_model->unit_quirk) { + /* + * DRAM domain on HSW server and KNL has fixed energy unit which can be + * different than the unit from power unit MSR. See +@@ -567,17 +631,16 @@ static int rapl_check_hw_unit(struct rapl_model *rm) + * of 2. Datasheet, September 2014, Reference Number: 330784-001 " + */ + case RAPL_UNIT_QUIRK_INTEL_HSW: +- rapl_hw_unit[PERF_RAPL_RAM] = 16; ++ rapl_pkg_hw_unit[PERF_RAPL_RAM] = 16; + break; + /* SPR uses a fixed energy unit for Psys domain. */ + case RAPL_UNIT_QUIRK_INTEL_SPR: +- rapl_hw_unit[PERF_RAPL_PSYS] = 0; ++ rapl_pkg_hw_unit[PERF_RAPL_PSYS] = 0; + break; + default: + break; + } + +- + /* + * Calculate the timer rate: + * Use reference of 200W for scaling the timeout to avoid counter +@@ -586,9 +649,9 @@ static int rapl_check_hw_unit(struct rapl_model *rm) + * if hw unit is 32, then we use 2 ms 1/200/2 + */ + rapl_timer_ms = 2; +- if (rapl_hw_unit[0] < 32) { ++ if (rapl_pkg_hw_unit[0] < 32) { + rapl_timer_ms = (1000 / (2 * 100)); +- rapl_timer_ms *= (1ULL << (32 - rapl_hw_unit[0] - 1)); ++ rapl_timer_ms *= (1ULL << (32 - rapl_pkg_hw_unit[0] - 1)); + } + return 0; + } +@@ -596,24 +659,32 @@ static int rapl_check_hw_unit(struct rapl_model *rm) + static void __init rapl_advertise(void) + { + int i; ++ int num_counters = hweight32(rapl_pmus_pkg->cntr_mask); ++ ++ if (rapl_pmus_core) ++ num_counters += hweight32(rapl_pmus_core->cntr_mask); + + pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n", +- hweight32(rapl_cntr_mask), rapl_timer_ms); ++ num_counters, rapl_timer_ms); + +- for (i = 0; i < NR_RAPL_DOMAINS; i++) { +- if (rapl_cntr_mask & (1 << i)) { ++ for (i = 0; i < NR_RAPL_PKG_DOMAINS; i++) { ++ if (rapl_pmus_pkg->cntr_mask & (1 << i)) { + pr_info("hw unit of domain %s 2^-%d Joules\n", +- rapl_domain_names[i], rapl_hw_unit[i]); ++ rapl_pkg_domain_names[i], rapl_pkg_hw_unit[i]); + } + } ++ ++ if (rapl_pmus_core && (rapl_pmus_core->cntr_mask & (1 << PERF_RAPL_CORE))) ++ pr_info("hw unit of domain %s 2^-%d Joules\n", ++ rapl_core_domain_name, rapl_core_hw_unit); + } + +-static void cleanup_rapl_pmus(void) ++static void cleanup_rapl_pmus(struct rapl_pmus *rapl_pmus) + { + int i; + + for (i = 0; i < rapl_pmus->nr_rapl_pmu; i++) +- kfree(rapl_pmus->pmus[i]); ++ kfree(rapl_pmus->rapl_pmu[i]); + kfree(rapl_pmus); + } + +@@ -626,46 +697,60 @@ static const struct attribute_group *rapl_attr_update[] = { + NULL, + }; + +-static int __init init_rapl_pmu(void) ++static const struct attribute_group *rapl_core_attr_update[] = { ++ &rapl_events_core_group, ++ NULL, ++}; ++ ++static int __init init_rapl_pmu(struct rapl_pmus *rapl_pmus) + { +- struct rapl_pmu *pmu; ++ struct rapl_pmu *rapl_pmu; + int idx; + + for (idx = 0; idx < rapl_pmus->nr_rapl_pmu; idx++) { +- pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); +- if (!pmu) ++ rapl_pmu = kzalloc(sizeof(*rapl_pmu), GFP_KERNEL); ++ if (!rapl_pmu) + goto free; + +- raw_spin_lock_init(&pmu->lock); +- INIT_LIST_HEAD(&pmu->active_list); +- pmu->pmu = &rapl_pmus->pmu; +- pmu->timer_interval = ms_to_ktime(rapl_timer_ms); +- rapl_hrtimer_init(pmu); ++ raw_spin_lock_init(&rapl_pmu->lock); ++ INIT_LIST_HEAD(&rapl_pmu->active_list); ++ rapl_pmu->pmu = &rapl_pmus->pmu; ++ rapl_pmu->timer_interval = ms_to_ktime(rapl_timer_ms); ++ rapl_hrtimer_init(rapl_pmu); + +- rapl_pmus->pmus[idx] = pmu; ++ rapl_pmus->rapl_pmu[idx] = rapl_pmu; + } + + return 0; + free: + for (; idx > 0; idx--) +- kfree(rapl_pmus->pmus[idx - 1]); ++ kfree(rapl_pmus->rapl_pmu[idx - 1]); + return -ENOMEM; + } + +-static int __init init_rapl_pmus(void) ++static int __init init_rapl_pmus(struct rapl_pmus **rapl_pmus_ptr, int rapl_pmu_scope, ++ const struct attribute_group **rapl_attr_groups, ++ const struct attribute_group **rapl_attr_update) + { + int nr_rapl_pmu = topology_max_packages(); +- int rapl_pmu_scope = PERF_PMU_SCOPE_PKG; ++ struct rapl_pmus *rapl_pmus; + +- if (!rapl_pmu_is_pkg_scope()) { +- nr_rapl_pmu *= topology_max_dies_per_package(); +- rapl_pmu_scope = PERF_PMU_SCOPE_DIE; +- } ++ /* ++ * rapl_pmu_scope must be either PKG, DIE or CORE ++ */ ++ if (rapl_pmu_scope == PERF_PMU_SCOPE_DIE) ++ nr_rapl_pmu *= topology_max_dies_per_package(); ++ else if (rapl_pmu_scope == PERF_PMU_SCOPE_CORE) ++ nr_rapl_pmu *= topology_num_cores_per_package(); ++ else if (rapl_pmu_scope != PERF_PMU_SCOPE_PKG) ++ return -EINVAL; + +- rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL); ++ rapl_pmus = kzalloc(struct_size(rapl_pmus, rapl_pmu, nr_rapl_pmu), GFP_KERNEL); + if (!rapl_pmus) + return -ENOMEM; + ++ *rapl_pmus_ptr = rapl_pmus; ++ + rapl_pmus->nr_rapl_pmu = nr_rapl_pmu; + rapl_pmus->pmu.attr_groups = rapl_attr_groups; + rapl_pmus->pmu.attr_update = rapl_attr_update; +@@ -680,75 +765,77 @@ static int __init init_rapl_pmus(void) + rapl_pmus->pmu.module = THIS_MODULE; + rapl_pmus->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; + +- return init_rapl_pmu(); ++ return init_rapl_pmu(rapl_pmus); + } + + static struct rapl_model model_snb = { +- .events = BIT(PERF_RAPL_PP0) | ++ .pkg_events = BIT(PERF_RAPL_PP0) | + BIT(PERF_RAPL_PKG) | + BIT(PERF_RAPL_PP1), + .msr_power_unit = MSR_RAPL_POWER_UNIT, +- .rapl_msrs = intel_rapl_msrs, ++ .rapl_pkg_msrs = intel_rapl_msrs, + }; + + static struct rapl_model model_snbep = { +- .events = BIT(PERF_RAPL_PP0) | ++ .pkg_events = BIT(PERF_RAPL_PP0) | + BIT(PERF_RAPL_PKG) | + BIT(PERF_RAPL_RAM), + .msr_power_unit = MSR_RAPL_POWER_UNIT, +- .rapl_msrs = intel_rapl_msrs, ++ .rapl_pkg_msrs = intel_rapl_msrs, + }; + + static struct rapl_model model_hsw = { +- .events = BIT(PERF_RAPL_PP0) | ++ .pkg_events = BIT(PERF_RAPL_PP0) | + BIT(PERF_RAPL_PKG) | + BIT(PERF_RAPL_RAM) | + BIT(PERF_RAPL_PP1), + .msr_power_unit = MSR_RAPL_POWER_UNIT, +- .rapl_msrs = intel_rapl_msrs, ++ .rapl_pkg_msrs = intel_rapl_msrs, + }; + + static struct rapl_model model_hsx = { +- .events = BIT(PERF_RAPL_PP0) | ++ .pkg_events = BIT(PERF_RAPL_PP0) | + BIT(PERF_RAPL_PKG) | + BIT(PERF_RAPL_RAM), + .unit_quirk = RAPL_UNIT_QUIRK_INTEL_HSW, + .msr_power_unit = MSR_RAPL_POWER_UNIT, +- .rapl_msrs = intel_rapl_msrs, ++ .rapl_pkg_msrs = intel_rapl_msrs, + }; + + static struct rapl_model model_knl = { +- .events = BIT(PERF_RAPL_PKG) | ++ .pkg_events = BIT(PERF_RAPL_PKG) | + BIT(PERF_RAPL_RAM), + .unit_quirk = RAPL_UNIT_QUIRK_INTEL_HSW, + .msr_power_unit = MSR_RAPL_POWER_UNIT, +- .rapl_msrs = intel_rapl_msrs, ++ .rapl_pkg_msrs = intel_rapl_msrs, + }; + + static struct rapl_model model_skl = { +- .events = BIT(PERF_RAPL_PP0) | ++ .pkg_events = BIT(PERF_RAPL_PP0) | + BIT(PERF_RAPL_PKG) | + BIT(PERF_RAPL_RAM) | + BIT(PERF_RAPL_PP1) | + BIT(PERF_RAPL_PSYS), + .msr_power_unit = MSR_RAPL_POWER_UNIT, +- .rapl_msrs = intel_rapl_msrs, ++ .rapl_pkg_msrs = intel_rapl_msrs, + }; + + static struct rapl_model model_spr = { +- .events = BIT(PERF_RAPL_PP0) | ++ .pkg_events = BIT(PERF_RAPL_PP0) | + BIT(PERF_RAPL_PKG) | + BIT(PERF_RAPL_RAM) | + BIT(PERF_RAPL_PSYS), + .unit_quirk = RAPL_UNIT_QUIRK_INTEL_SPR, + .msr_power_unit = MSR_RAPL_POWER_UNIT, +- .rapl_msrs = intel_rapl_spr_msrs, ++ .rapl_pkg_msrs = intel_rapl_spr_msrs, + }; + + static struct rapl_model model_amd_hygon = { +- .events = BIT(PERF_RAPL_PKG), ++ .pkg_events = BIT(PERF_RAPL_PKG), ++ .core_events = BIT(PERF_RAPL_CORE), + .msr_power_unit = MSR_AMD_RAPL_POWER_UNIT, +- .rapl_msrs = amd_rapl_msrs, ++ .rapl_pkg_msrs = amd_rapl_pkg_msrs, ++ .rapl_core_msrs = amd_rapl_core_msrs, + }; + + static const struct x86_cpu_id rapl_model_match[] __initconst = { +@@ -804,45 +891,73 @@ MODULE_DEVICE_TABLE(x86cpu, rapl_model_match); + static int __init rapl_pmu_init(void) + { + const struct x86_cpu_id *id; +- struct rapl_model *rm; ++ int rapl_pkg_pmu_scope = PERF_PMU_SCOPE_DIE; + int ret; + ++ if (rapl_pkg_pmu_is_pkg_scope()) ++ rapl_pkg_pmu_scope = PERF_PMU_SCOPE_PKG; ++ + id = x86_match_cpu(rapl_model_match); + if (!id) + return -ENODEV; + +- rm = (struct rapl_model *) id->driver_data; +- +- rapl_msrs = rm->rapl_msrs; ++ rapl_model = (struct rapl_model *) id->driver_data; + +- rapl_cntr_mask = perf_msr_probe(rapl_msrs, PERF_RAPL_MAX, +- false, (void *) &rm->events); +- +- ret = rapl_check_hw_unit(rm); ++ ret = rapl_check_hw_unit(); + if (ret) + return ret; + +- ret = init_rapl_pmus(); ++ ret = init_rapl_pmus(&rapl_pmus_pkg, rapl_pkg_pmu_scope, rapl_attr_groups, ++ rapl_attr_update); + if (ret) + return ret; + +- ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); ++ rapl_pmus_pkg->cntr_mask = perf_msr_probe(rapl_model->rapl_pkg_msrs, ++ PERF_RAPL_PKG_EVENTS_MAX, false, ++ (void *) &rapl_model->pkg_events); ++ ++ ret = perf_pmu_register(&rapl_pmus_pkg->pmu, "power", -1); + if (ret) + goto out; + ++ if (rapl_model->core_events) { ++ ret = init_rapl_pmus(&rapl_pmus_core, PERF_PMU_SCOPE_CORE, ++ rapl_core_attr_groups, ++ rapl_core_attr_update); ++ if (ret) { ++ pr_warn("power-core PMU initialization failed (%d)\n", ret); ++ goto core_init_failed; ++ } ++ ++ rapl_pmus_core->cntr_mask = perf_msr_probe(rapl_model->rapl_core_msrs, ++ PERF_RAPL_CORE_EVENTS_MAX, false, ++ (void *) &rapl_model->core_events); ++ ++ ret = perf_pmu_register(&rapl_pmus_core->pmu, "power_core", -1); ++ if (ret) { ++ pr_warn("power-core PMU registration failed (%d)\n", ret); ++ cleanup_rapl_pmus(rapl_pmus_core); ++ } ++ } ++ ++core_init_failed: + rapl_advertise(); + return 0; + + out: + pr_warn("Initialization failed (%d), disabled\n", ret); +- cleanup_rapl_pmus(); ++ cleanup_rapl_pmus(rapl_pmus_pkg); + return ret; + } + module_init(rapl_pmu_init); + + static void __exit intel_rapl_exit(void) + { +- perf_pmu_unregister(&rapl_pmus->pmu); +- cleanup_rapl_pmus(); ++ if (rapl_pmus_core) { ++ perf_pmu_unregister(&rapl_pmus_core->pmu); ++ cleanup_rapl_pmus(rapl_pmus_core); ++ } ++ perf_pmu_unregister(&rapl_pmus_pkg->pmu); ++ cleanup_rapl_pmus(rapl_pmus_pkg); + } + module_exit(intel_rapl_exit); diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index 1cc113200ff5..cbe6c71e17c1 100644 --- a/arch/x86/hyperv/mmu.c @@ -2148,7 +3152,7 @@ index 1cc113200ff5..cbe6c71e17c1 100644 - pv_ops.mmu.tlb_remove_table = tlb_remove_table; } diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h -index 508c0dad116b..b5c66b7465ba 100644 +index 645aa360628d..989e4c9cad2e 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -338,6 +338,7 @@ @@ -2325,7 +3329,7 @@ index 2886cb668d7f..65f50464b5c3 100644 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h -index 9a71880eec07..a7ea9720ba3c 100644 +index 3ae84c3b8e6d..dc1c1057f26e 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -25,6 +25,7 @@ @@ -2345,7 +3349,7 @@ index 9a71880eec07..a7ea9720ba3c 100644 /* diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h -index 041aff51eb50..38a632a282d4 100644 +index d4eb9e1d61b8..794ba3647c6c 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -91,11 +91,6 @@ static inline void __flush_tlb_multi(const struct cpumask *cpumask, @@ -2361,10 +3365,10 @@ index 041aff51eb50..38a632a282d4 100644 { PVOP_VCALL1(mmu.exit_mmap, mm); diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h -index fea56b04f436..e26633c00455 100644 +index 8d4fbe1be489..13405959e4db 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h -@@ -134,8 +134,6 @@ struct pv_mmu_ops { +@@ -136,8 +136,6 @@ struct pv_mmu_ops { void (*flush_tlb_multi)(const struct cpumask *cpus, const struct flush_tlb_info *info); @@ -2397,6 +3401,18 @@ index b3ab80a03365..5e883b397ff3 100644 /* Can be used to override the logic in pci_scan_bus for skipping already-configured bus numbers - to be used for buggy BIOSes or architectures with incomplete PCI setup by the loader */ +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index 20e6009381ed..c0cd10182e90 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -98,6 +98,7 @@ struct cpuinfo_topology { + // Logical ID mappings + u32 logical_pkg_id; + u32 logical_die_id; ++ u32 logical_core_id; + + // AMD Node ID and Nodes per Package info + u32 amd_node_id; diff --git a/arch/x86/include/asm/tlbbatch.h b/arch/x86/include/asm/tlbbatch.h index 1ad56eb3e8a8..f9a17edf63ad 100644 --- a/arch/x86/include/asm/tlbbatch.h @@ -2539,6 +3555,36 @@ index 69e79fff41b8..5490ca71e27f 100644 static inline bool pte_flags_need_flush(unsigned long oldflags, unsigned long newflags, +diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h +index fd41103ad342..ec134b719144 100644 +--- a/arch/x86/include/asm/topology.h ++++ b/arch/x86/include/asm/topology.h +@@ -143,6 +143,7 @@ extern const struct cpumask *cpu_clustergroup_mask(int cpu); + #define topology_logical_package_id(cpu) (cpu_data(cpu).topo.logical_pkg_id) + #define topology_physical_package_id(cpu) (cpu_data(cpu).topo.pkg_id) + #define topology_logical_die_id(cpu) (cpu_data(cpu).topo.logical_die_id) ++#define topology_logical_core_id(cpu) (cpu_data(cpu).topo.logical_core_id) + #define topology_die_id(cpu) (cpu_data(cpu).topo.die_id) + #define topology_core_id(cpu) (cpu_data(cpu).topo.core_id) + #define topology_ppin(cpu) (cpu_data(cpu).ppin) +@@ -250,7 +251,7 @@ extern bool x86_topology_update; + #include + + DECLARE_PER_CPU_READ_MOSTLY(int, sched_core_priority); +-extern unsigned int __read_mostly sysctl_sched_itmt_enabled; ++extern bool __read_mostly sysctl_sched_itmt_enabled; + + /* Interface to set priority of a cpu */ + void sched_set_itmt_core_prio(int prio, int core_cpu); +@@ -263,7 +264,7 @@ void sched_clear_itmt_support(void); + + #else /* CONFIG_SCHED_MC_PRIO */ + +-#define sysctl_sched_itmt_enabled 0 ++#define sysctl_sched_itmt_enabled false + static inline void sched_set_itmt_core_prio(int prio, int core_cpu) + { + } diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h index 75884d2cdec3..2fdae271f47f 100644 --- a/arch/x86/include/asm/vermagic.h @@ -2630,7 +3676,7 @@ index 75884d2cdec3..2fdae271f47f 100644 #define MODULE_PROC_FAMILY "ELAN " #elif defined CONFIG_MCRUSOE diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index 54194f5995de..38f454671c88 100644 +index 79d2e17f6582..21076252a491 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -29,6 +29,8 @@ @@ -2642,7 +3688,7 @@ index 54194f5995de..38f454671c88 100644 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) { u32 gprs[8] = { 0 }; -@@ -1073,6 +1075,10 @@ static void init_amd(struct cpuinfo_x86 *c) +@@ -1069,6 +1071,10 @@ static void init_amd(struct cpuinfo_x86 *c) /* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); @@ -2653,7 +3699,7 @@ index 54194f5995de..38f454671c88 100644 } #ifdef CONFIG_X86_32 -@@ -1139,6 +1145,12 @@ static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) +@@ -1135,6 +1141,12 @@ static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) tlb_lli_2m[ENTRIES] = eax & mask; tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; @@ -2666,8 +3712,180 @@ index 54194f5995de..38f454671c88 100644 } static const struct cpu_dev amd_cpu_dev = { +diff --git a/arch/x86/kernel/cpu/debugfs.c b/arch/x86/kernel/cpu/debugfs.c +index 10719aba6276..cacfd3f6abef 100644 +--- a/arch/x86/kernel/cpu/debugfs.c ++++ b/arch/x86/kernel/cpu/debugfs.c +@@ -25,6 +25,7 @@ static int cpu_debug_show(struct seq_file *m, void *p) + seq_printf(m, "cpu_type: %s\n", get_topology_cpu_type_name(c)); + seq_printf(m, "logical_pkg_id: %u\n", c->topo.logical_pkg_id); + seq_printf(m, "logical_die_id: %u\n", c->topo.logical_die_id); ++ seq_printf(m, "logical_core_id: %u\n", c->topo.logical_core_id); + seq_printf(m, "llc_id: %u\n", c->topo.llc_id); + seq_printf(m, "l2c_id: %u\n", c->topo.l2c_id); + seq_printf(m, "amd_node_id: %u\n", c->topo.amd_node_id); +diff --git a/arch/x86/kernel/cpu/topology_common.c b/arch/x86/kernel/cpu/topology_common.c +index 8277c64f88db..b5a5e1411469 100644 +--- a/arch/x86/kernel/cpu/topology_common.c ++++ b/arch/x86/kernel/cpu/topology_common.c +@@ -185,6 +185,7 @@ static void topo_set_ids(struct topo_scan *tscan, bool early) + if (!early) { + c->topo.logical_pkg_id = topology_get_logical_id(apicid, TOPO_PKG_DOMAIN); + c->topo.logical_die_id = topology_get_logical_id(apicid, TOPO_DIE_DOMAIN); ++ c->topo.logical_core_id = topology_get_logical_id(apicid, TOPO_CORE_DOMAIN); + } + + /* Package relative core ID */ +diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c +index 51b805c727fc..9cea1fc36c18 100644 +--- a/arch/x86/kernel/itmt.c ++++ b/arch/x86/kernel/itmt.c +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -34,49 +35,38 @@ static bool __read_mostly sched_itmt_capable; + * of higher turbo frequency for cpus supporting Intel Turbo Boost Max + * Technology 3.0. + * +- * It can be set via /proc/sys/kernel/sched_itmt_enabled ++ * It can be set via /sys/kernel/debug/x86/sched_itmt_enabled + */ +-unsigned int __read_mostly sysctl_sched_itmt_enabled; ++bool __read_mostly sysctl_sched_itmt_enabled; + +-static int sched_itmt_update_handler(const struct ctl_table *table, int write, +- void *buffer, size_t *lenp, loff_t *ppos) ++static ssize_t sched_itmt_enabled_write(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, loff_t *ppos) + { +- unsigned int old_sysctl; +- int ret; ++ ssize_t result; ++ bool orig; + +- mutex_lock(&itmt_update_mutex); ++ guard(mutex)(&itmt_update_mutex); + +- if (!sched_itmt_capable) { +- mutex_unlock(&itmt_update_mutex); +- return -EINVAL; +- } +- +- old_sysctl = sysctl_sched_itmt_enabled; +- ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); ++ orig = sysctl_sched_itmt_enabled; ++ result = debugfs_write_file_bool(filp, ubuf, cnt, ppos); + +- if (!ret && write && old_sysctl != sysctl_sched_itmt_enabled) { ++ if (sysctl_sched_itmt_enabled != orig) { + x86_topology_update = true; + rebuild_sched_domains(); + } + +- mutex_unlock(&itmt_update_mutex); +- +- return ret; ++ return result; + } + +-static struct ctl_table itmt_kern_table[] = { +- { +- .procname = "sched_itmt_enabled", +- .data = &sysctl_sched_itmt_enabled, +- .maxlen = sizeof(unsigned int), +- .mode = 0644, +- .proc_handler = sched_itmt_update_handler, +- .extra1 = SYSCTL_ZERO, +- .extra2 = SYSCTL_ONE, +- }, ++static const struct file_operations dfs_sched_itmt_fops = { ++ .read = debugfs_read_file_bool, ++ .write = sched_itmt_enabled_write, ++ .open = simple_open, ++ .llseek = default_llseek, + }; + +-static struct ctl_table_header *itmt_sysctl_header; ++static struct dentry *dfs_sched_itmt; + + /** + * sched_set_itmt_support() - Indicate platform supports ITMT +@@ -97,16 +87,18 @@ static struct ctl_table_header *itmt_sysctl_header; + */ + int sched_set_itmt_support(void) + { +- mutex_lock(&itmt_update_mutex); ++ guard(mutex)(&itmt_update_mutex); + +- if (sched_itmt_capable) { +- mutex_unlock(&itmt_update_mutex); ++ if (sched_itmt_capable) + return 0; +- } + +- itmt_sysctl_header = register_sysctl("kernel", itmt_kern_table); +- if (!itmt_sysctl_header) { +- mutex_unlock(&itmt_update_mutex); ++ dfs_sched_itmt = debugfs_create_file_unsafe("sched_itmt_enabled", ++ 0644, ++ arch_debugfs_dir, ++ &sysctl_sched_itmt_enabled, ++ &dfs_sched_itmt_fops); ++ if (IS_ERR_OR_NULL(dfs_sched_itmt)) { ++ dfs_sched_itmt = NULL; + return -ENOMEM; + } + +@@ -117,8 +109,6 @@ int sched_set_itmt_support(void) + x86_topology_update = true; + rebuild_sched_domains(); + +- mutex_unlock(&itmt_update_mutex); +- + return 0; + } + +@@ -134,18 +124,15 @@ int sched_set_itmt_support(void) + */ + void sched_clear_itmt_support(void) + { +- mutex_lock(&itmt_update_mutex); ++ guard(mutex)(&itmt_update_mutex); + +- if (!sched_itmt_capable) { +- mutex_unlock(&itmt_update_mutex); ++ if (!sched_itmt_capable) + return; +- } ++ + sched_itmt_capable = false; + +- if (itmt_sysctl_header) { +- unregister_sysctl_table(itmt_sysctl_header); +- itmt_sysctl_header = NULL; +- } ++ debugfs_remove(dfs_sched_itmt); ++ dfs_sched_itmt = NULL; + + if (sysctl_sched_itmt_enabled) { + /* disable sched_itmt if we are no longer ITMT capable */ +@@ -153,8 +140,6 @@ void sched_clear_itmt_support(void) + x86_topology_update = true; + rebuild_sched_domains(); + } +- +- mutex_unlock(&itmt_update_mutex); + } + + int arch_asym_cpu_priority(int cpu) diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c -index 7a422a6c5983..3be9b3342c67 100644 +index 21e9e4845354..83b7679658b1 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -838,7 +838,6 @@ static void __init kvm_guest_init(void) @@ -2679,7 +3897,7 @@ index 7a422a6c5983..3be9b3342c67 100644 } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c -index 927e33e6843a..2aa251d0b308 100644 +index fec381533555..c019771e0123 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -59,11 +59,6 @@ void __init native_pv_lock_init(void) @@ -2694,7 +3912,7 @@ index 927e33e6843a..2aa251d0b308 100644 struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; -@@ -185,7 +180,6 @@ struct paravirt_patch_template pv_ops = { +@@ -191,7 +186,6 @@ struct paravirt_patch_template pv_ops = { .mmu.flush_tlb_kernel = native_flush_tlb_global, .mmu.flush_tlb_one_user = native_flush_tlb_one_user, .mmu.flush_tlb_multi = native_flush_tlb_multi, @@ -2702,6 +3920,57 @@ index 927e33e6843a..2aa251d0b308 100644 .mmu.exit_mmap = paravirt_nop, .mmu.notify_page_enc_status_changed = paravirt_nop, +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c +index b5a8f0891135..ef63b1c0b491 100644 +--- a/arch/x86/kernel/smpboot.c ++++ b/arch/x86/kernel/smpboot.c +@@ -482,12 +482,6 @@ static int x86_core_flags(void) + return cpu_core_flags() | x86_sched_itmt_flags(); + } + #endif +-#ifdef CONFIG_SCHED_SMT +-static int x86_smt_flags(void) +-{ +- return cpu_smt_flags(); +-} +-#endif + #ifdef CONFIG_SCHED_CLUSTER + static int x86_cluster_flags(void) + { +@@ -495,15 +489,6 @@ static int x86_cluster_flags(void) + } + #endif + +-static int x86_die_flags(void) +-{ +- if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU) || +- cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES)) +- return x86_sched_itmt_flags(); +- +- return 0; +-} +- + /* + * Set if a package/die has multiple NUMA nodes inside. + * AMD Magny-Cours, Intel Cluster-on-Die, and Intel +@@ -519,7 +504,7 @@ static void __init build_sched_topology(void) + + #ifdef CONFIG_SCHED_SMT + x86_topology[i++] = (struct sched_domain_topology_level){ +- cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) ++ cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) + }; + #endif + #ifdef CONFIG_SCHED_CLUSTER +@@ -539,7 +524,7 @@ static void __init build_sched_topology(void) + */ + if (!x86_has_numa_in_package) { + x86_topology[i++] = (struct sched_domain_topology_level){ +- cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(PKG) ++ cpu_cpu_mask, x86_sched_itmt_flags, SD_INIT_NAME(PKG) + }; + } + diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 5745a354a241..3dc4af1f7868 100644 --- a/arch/x86/mm/pgtable.c @@ -3459,10 +4728,10 @@ index 37effc1b134e..5c944f0dcc20 100644 +468 common process_ksm_disable sys_process_ksm_disable +469 common process_ksm_status sys_process_ksm_status diff --git a/block/elevator.c b/block/elevator.c -index b81216c48b6b..8df4c9925f2a 100644 +index 7c3ba80e5ff4..06e974eb6594 100644 --- a/block/elevator.c +++ b/block/elevator.c -@@ -558,9 +558,17 @@ static struct elevator_type *elevator_get_default(struct request_queue *q) +@@ -566,9 +566,17 @@ static struct elevator_type *elevator_get_default(struct request_queue *q) if (q->nr_hw_queues != 1 && !blk_mq_is_shared_tags(q->tag_set->flags)) @@ -4592,7 +5861,7 @@ index b8e2396a708a..d8e529cd454d 100644 default_driver = &intel_pstate; else if (!strcmp(str, "passive")) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h -index 69895fccb474..5884de0f23bb 100644 +index 4653a8d2823a..6590e83dfbf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -160,6 +160,7 @@ struct amdgpu_watchdog_timer { @@ -4692,10 +5961,10 @@ index 0e16432d9a72..867bc5c5ce67 100644 bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -index 36053b3d48b3..b4517727a6ca 100644 +index cd4fac120834..1ab433d774cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -@@ -4465,8 +4465,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, +@@ -4461,8 +4461,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, goto failed; } /* init i2c buses */ @@ -4705,7 +5974,7 @@ index 36053b3d48b3..b4517727a6ca 100644 } } -@@ -4735,8 +4734,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) +@@ -4724,8 +4723,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) amdgpu_reset_fini(adev); /* free i2c buses */ @@ -4716,7 +5985,7 @@ index 36053b3d48b3..b4517727a6ca 100644 if (amdgpu_emu_mode != 1) amdgpu_atombios_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c -index 492b09d84571..f5fcd626fcae 100644 +index 38686203bea6..ade5a5b597e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -137,6 +137,7 @@ enum AMDGPU_DEBUG_MASK { @@ -4743,6 +6012,16 @@ index 492b09d84571..f5fcd626fcae 100644 /** * DOC: vramlimit (int) * Restrict the total amount of VRAM in MiB for testing. The default is 0 (Use full VRAM). +@@ -2246,6 +2256,9 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, + int ret, retry = 0, i; + bool supports_atomic = false; + ++ if (vga_switcheroo_client_probe_defer(pdev)) ++ return -EPROBE_DEFER; ++ + /* skip devices which are owned by radeon */ + for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) { + if (amdgpu_unsupported_pciidlist[i] == pdev->device) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index f0765ccde668..8179d0814db9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -4842,7 +6121,7 @@ index a51f3414b65d..bc0f9759c5c5 100644 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", dev->primary->index); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c -index b1818e87889a..f13e5eecd32a 100644 +index ba5160399ab2..ee80e748597e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -1339,7 +1339,7 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block) @@ -4855,10 +6134,10 @@ index b1818e87889a..f13e5eecd32a 100644 DRM_INFO("use_doorbell being set to: [%s]\n", diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig -index abd3b6564373..46937e6fa78d 100644 +index 11e3f2f3b174..7b1bd69dc29e 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig -@@ -56,4 +56,10 @@ config DRM_AMD_SECURE_DISPLAY +@@ -54,4 +54,10 @@ config DRM_AMD_SECURE_DISPLAY This option enables the calculation of crc of specific region via debugfs. Cooperate with specific DMCU FW. @@ -4870,10 +6149,10 @@ index abd3b6564373..46937e6fa78d 100644 + endmenu diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c -index 0ec178ca7434..c148a18b95e3 100644 +index 5f216d626cbb..d88a8e0858a6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c -@@ -164,6 +164,9 @@ MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB); +@@ -162,6 +162,9 @@ MODULE_FIRMWARE(FIRMWARE_DCN_401_DMUB); /* Number of bytes in PSP footer for firmware. */ #define PSP_FOOTER_BYTES 0x100 @@ -4883,7 +6162,7 @@ index 0ec178ca7434..c148a18b95e3 100644 /** * DOC: overview * -@@ -179,6 +182,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev); +@@ -177,6 +180,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev); static void amdgpu_dm_fini(struct amdgpu_device *adev); static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); static void reset_freesync_config_for_crtc(struct dm_crtc_state *new_crtc_state); @@ -4892,7 +6171,7 @@ index 0ec178ca7434..c148a18b95e3 100644 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) { -@@ -2890,6 +2895,33 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) +@@ -2839,6 +2844,33 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) return 0; } @@ -4926,7 +6205,7 @@ index 0ec178ca7434..c148a18b95e3 100644 /** * dm_hw_init() - Initialize DC device * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. -@@ -2921,6 +2953,10 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block) +@@ -2870,6 +2902,10 @@ static int dm_hw_init(struct amdgpu_ip_block *ip_block) return r; amdgpu_dm_hpd_init(adev); @@ -4937,7 +6216,7 @@ index 0ec178ca7434..c148a18b95e3 100644 return 0; } -@@ -2936,6 +2972,8 @@ static int dm_hw_fini(struct amdgpu_ip_block *ip_block) +@@ -2885,6 +2921,8 @@ static int dm_hw_fini(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -4946,7 +6225,7 @@ index 0ec178ca7434..c148a18b95e3 100644 amdgpu_dm_hpd_fini(adev); amdgpu_dm_irq_fini(adev); -@@ -4579,7 +4617,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) +@@ -4516,7 +4554,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) return r; } @@ -4955,7 +6234,7 @@ index 0ec178ca7434..c148a18b95e3 100644 if (amdgpu_dm_create_color_properties(adev)) { dc_state_release(state->context); kfree(state); -@@ -4597,7 +4635,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) +@@ -4534,7 +4572,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) return 0; } @@ -4964,7 +6243,7 @@ index 0ec178ca7434..c148a18b95e3 100644 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 #define AMDGPU_DM_MIN_SPREAD ((AMDGPU_DM_DEFAULT_MAX_BACKLIGHT - AMDGPU_DM_DEFAULT_MIN_BACKLIGHT) / 2) #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 -@@ -4631,11 +4669,27 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, +@@ -4568,11 +4606,27 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, if (caps.caps_valid) { dm->backlight_caps[bl_idx].caps_valid = true; @@ -4992,7 +6271,7 @@ index 0ec178ca7434..c148a18b95e3 100644 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; dm->backlight_caps[bl_idx].max_input_signal = -@@ -4645,6 +4699,9 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, +@@ -4582,6 +4636,9 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, if (dm->backlight_caps[bl_idx].aux_support) return; @@ -5002,7 +6281,7 @@ index 0ec178ca7434..c148a18b95e3 100644 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; #endif -@@ -4676,9 +4733,9 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c +@@ -4613,9 +4670,9 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c if (!get_brightness_range(caps, &min, &max)) return brightness; @@ -5015,7 +6294,7 @@ index 0ec178ca7434..c148a18b95e3 100644 } static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, -@@ -4691,9 +4748,9 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap +@@ -4628,9 +4685,9 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap if (brightness < min) return 0; @@ -5028,7 +6307,7 @@ index 0ec178ca7434..c148a18b95e3 100644 } static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, -@@ -8279,7 +8336,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, +@@ -8218,7 +8275,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, int i; int result = -EIO; @@ -5037,7 +6316,7 @@ index 0ec178ca7434..c148a18b95e3 100644 return result; cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); -@@ -8298,11 +8355,18 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, +@@ -8237,11 +8294,18 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, cmd.payloads[i].data = msgs[i].buf; } @@ -5061,7 +6340,7 @@ index 0ec178ca7434..c148a18b95e3 100644 kfree(cmd.payloads); return result; -@@ -8319,9 +8383,7 @@ static const struct i2c_algorithm amdgpu_dm_i2c_algo = { +@@ -8258,9 +8322,7 @@ static const struct i2c_algorithm amdgpu_dm_i2c_algo = { }; static struct amdgpu_i2c_adapter * @@ -5072,7 +6351,7 @@ index 0ec178ca7434..c148a18b95e3 100644 { struct amdgpu_device *adev = ddc_service->ctx->driver_context; struct amdgpu_i2c_adapter *i2c; -@@ -8332,9 +8394,14 @@ create_i2c(struct ddc_service *ddc_service, +@@ -8271,9 +8333,14 @@ create_i2c(struct ddc_service *ddc_service, i2c->base.owner = THIS_MODULE; i2c->base.dev.parent = &adev->pdev->dev; i2c->base.algo = &amdgpu_dm_i2c_algo; @@ -5088,7 +6367,7 @@ index 0ec178ca7434..c148a18b95e3 100644 return i2c; } -@@ -8380,7 +8447,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, +@@ -8298,7 +8365,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, link->priv = aconnector; @@ -5097,7 +6376,7 @@ index 0ec178ca7434..c148a18b95e3 100644 if (!i2c) { DRM_ERROR("Failed to create i2c adapter data\n"); return -ENOMEM; -@@ -9045,7 +9112,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, +@@ -8959,7 +9026,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, int planes_count = 0, vpos, hpos; unsigned long flags; u32 target_vblank, last_flip_vblank; @@ -5107,7 +6386,7 @@ index 0ec178ca7434..c148a18b95e3 100644 bool pflip_present = false; bool dirty_rects_changed = false; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h -index d2703ca7dff3..ef60e80de19c 100644 +index 2227cd8e4a89..5710776bb0e2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -606,6 +606,13 @@ struct amdgpu_display_manager { @@ -5169,10 +6448,10 @@ index 36a830a7440f..a8fc8bd52d51 100644 #endif return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c -index 774cc3f4f3fd..d5066e407ad5 100644 +index 495e3cd70426..704a48209657 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c -@@ -1595,7 +1595,7 @@ static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane, +@@ -1573,7 +1573,7 @@ static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane, drm_atomic_helper_plane_destroy_state(plane, state); } @@ -5181,7 +6460,7 @@ index 774cc3f4f3fd..d5066e407ad5 100644 static void dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm, struct drm_plane *plane) -@@ -1786,7 +1786,7 @@ static const struct drm_plane_funcs dm_plane_funcs = { +@@ -1764,7 +1764,7 @@ static const struct drm_plane_funcs dm_plane_funcs = { .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state, .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state, .format_mod_supported = amdgpu_dm_plane_format_mod_supported, @@ -5190,9 +6469,9 @@ index 774cc3f4f3fd..d5066e407ad5 100644 .atomic_set_property = dm_atomic_plane_set_property, .atomic_get_property = dm_atomic_plane_get_property, #endif -@@ -1882,7 +1882,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, - else - drm_plane_helper_add(plane, &dm_plane_helper_funcs); +@@ -1857,7 +1857,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, + + drm_plane_helper_add(plane, &dm_plane_helper_funcs); -#ifdef AMD_PRIVATE_COLOR +#ifdef CONFIG_AMD_PRIVATE_COLOR @@ -5200,7 +6479,7 @@ index 774cc3f4f3fd..d5066e407ad5 100644 #endif /* Create (reset) the plane state */ diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c -index a62f6c51301c..1d2c6019efac 100644 +index c9a6de110b74..470ec970217b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1778,6 +1778,7 @@ static enum bp_result get_firmware_info_v3_1( @@ -5238,10 +6517,10 @@ index a62f6c51301c..1d2c6019efac 100644 return BP_RESULT_OK; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c -index c1b79b379447..261c3bc4d46e 100644 +index 457d60eeb486..13636eb4ec3f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c -@@ -150,6 +150,12 @@ bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx) +@@ -142,6 +142,12 @@ bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx) return link->dc->link_srv->update_dsc_config(pipe_ctx); } @@ -5255,10 +6534,10 @@ index c1b79b379447..261c3bc4d46e 100644 struct dc *dc, size_t slave_address) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h -index 053481ab69ef..0c2c0fc45ae5 100644 +index 08c5a315b3a6..70d6005ecd64 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h -@@ -1947,6 +1947,9 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc, +@@ -1939,6 +1939,9 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, enum aux_return_code_type *operation_result); @@ -5296,7 +6575,7 @@ index 81f4c386c287..1f9e10caf456 100644 /* * Add the logic to extract BOTH power up and power down sequences diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c -index 29606fda029d..ff127f8a405b 100644 +index 60f15a9ba7a5..c3da48c213d4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -35,6 +35,8 @@ @@ -5402,10 +6681,10 @@ index e8ae7681bf0a..8a0d873983f3 100644 } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c -index 8ca793c222ff..8a06a6c35a4b 100644 +index 21bd635bcdfc..6f4032038fc7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c -@@ -2822,7 +2822,10 @@ int smu_get_power_limit(void *handle, +@@ -2809,7 +2809,10 @@ int smu_get_power_limit(void *handle, *limit = smu->max_power_limit; break; case SMU_PPT_LIMIT_MIN: @@ -5417,7 +6696,7 @@ index 8ca793c222ff..8a06a6c35a4b 100644 break; default: return -EINVAL; -@@ -2846,7 +2849,14 @@ static int smu_set_power_limit(void *handle, uint32_t limit) +@@ -2833,7 +2836,14 @@ static int smu_set_power_limit(void *handle, uint32_t limit) if (smu->ppt_funcs->set_power_limit) return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); @@ -5434,7 +6713,7 @@ index 8ca793c222ff..8a06a6c35a4b 100644 "New power limit (%d) is out of range [%d,%d]\n", limit, smu->min_power_limit, smu->max_power_limit); diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c -index 13bc4c290b17..9b741e6262bc 100644 +index 855beafb76ff..ad78059ee954 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -94,6 +94,8 @@ static int oui(u8 first, u8 second, u8 third) @@ -5459,7 +6738,7 @@ index 13bc4c290b17..9b741e6262bc 100644 /* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */ EDID_QUIRK('S', 'N', 'Y', 0x2541, EDID_QUIRK_FORCE_12BPC), -@@ -6759,7 +6767,37 @@ static void update_display_info(struct drm_connector *connector, +@@ -6753,7 +6761,37 @@ static void update_display_info(struct drm_connector *connector, drm_edid_to_eld(connector, drm_edid); } @@ -5498,7 +6777,7 @@ index 13bc4c290b17..9b741e6262bc 100644 struct displayid_detailed_timings_1 *timings, bool type_7) { -@@ -6778,7 +6816,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d +@@ -6772,7 +6810,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d bool hsync_positive = (timings->hsync[1] >> 7) & 0x1; bool vsync_positive = (timings->vsync[1] >> 7) & 0x1; @@ -5507,7 +6786,7 @@ index 13bc4c290b17..9b741e6262bc 100644 if (!mode) return NULL; -@@ -6801,6 +6839,9 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d +@@ -6795,6 +6833,9 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d if (timings->flags & 0x80) mode->type |= DRM_MODE_TYPE_PREFERRED; @@ -5517,7 +6796,7 @@ index 13bc4c290b17..9b741e6262bc 100644 drm_mode_set_name(mode); return mode; -@@ -6823,7 +6864,7 @@ static int add_displayid_detailed_1_modes(struct drm_connector *connector, +@@ -6817,7 +6858,7 @@ static int add_displayid_detailed_1_modes(struct drm_connector *connector, for (i = 0; i < num_timings; i++) { struct displayid_detailed_timings_1 *timings = &det->timings[i]; @@ -5526,11 +6805,1022 @@ index 13bc4c290b17..9b741e6262bc 100644 if (!newmode) continue; +diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c +index b1be458ed4dd..28c0e76a1e88 100644 +--- a/drivers/gpu/drm/drm_format_helper.c ++++ b/drivers/gpu/drm/drm_format_helper.c +@@ -702,6 +702,57 @@ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pi + } + EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888); + ++static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels) ++{ ++ u8 *dbuf8 = dbuf; ++ const __le32 *sbuf32 = sbuf; ++ unsigned int x; ++ u32 pix; ++ ++ for (x = 0; x < pixels; x++) { ++ pix = le32_to_cpu(sbuf32[x]); ++ /* write red-green-blue to output in little endianness */ ++ *dbuf8++ = (pix & 0x00FF0000) >> 16; ++ *dbuf8++ = (pix & 0x0000FF00) >> 8; ++ *dbuf8++ = (pix & 0x000000FF) >> 0; ++ } ++} ++ ++/** ++ * drm_fb_xrgb8888_to_bgr888 - Convert XRGB8888 to BGR888 clip buffer ++ * @dst: Array of BGR888 destination buffers ++ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines ++ * within @dst; can be NULL if scanlines are stored next to each other. ++ * @src: Array of XRGB8888 source buffers ++ * @fb: DRM framebuffer ++ * @clip: Clip rectangle area to copy ++ * @state: Transform and conversion state ++ * ++ * This function copies parts of a framebuffer to display memory and converts the ++ * color format during the process. Destination and framebuffer formats must match. The ++ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at ++ * least as many entries as there are planes in @fb's format. Each entry stores the ++ * value for the format's respective color plane at the same index. ++ * ++ * This function does not apply clipping on @dst (i.e. the destination is at the ++ * top-left corner). ++ * ++ * Drivers can use this function for BGR888 devices that don't natively ++ * support XRGB8888. ++ */ ++void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch, ++ const struct iosys_map *src, const struct drm_framebuffer *fb, ++ const struct drm_rect *clip, struct drm_format_conv_state *state) ++{ ++ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { ++ 3, ++ }; ++ ++ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, ++ drm_fb_xrgb8888_to_bgr888_line); ++} ++EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888); ++ + static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels) + { + __le32 *dbuf32 = dbuf; +@@ -1035,6 +1086,9 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d + } else if (dst_format == DRM_FORMAT_RGB888) { + drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state); + return 0; ++ } else if (dst_format == DRM_FORMAT_BGR888) { ++ drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state); ++ return 0; + } else if (dst_format == DRM_FORMAT_ARGB8888) { + drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state); + return 0; +diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c +index 49b5cc01ce40..1435f49f2ce6 100644 +--- a/drivers/gpu/drm/i915/display/intel_ddi.c ++++ b/drivers/gpu/drm/i915/display/intel_ddi.c +@@ -4685,6 +4685,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port) + + static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port) + { ++ struct intel_display *display = to_intel_display(dig_port); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + + if (dig_port->base.port != PORT_A) +@@ -4693,6 +4694,9 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port) + if (dig_port->saved_port_bits & DDI_A_4_LANES) + return false; + ++ if (intel_has_quirk(display, QUIRK_DDI_A_FORCE_4_LANES)) ++ return true; ++ + /* Broxton/Geminilake: Bspec says that DDI_A_4_LANES is the only + * supported configuration + */ +diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c +index 00852ff5b247..4c56f1b622be 100644 +--- a/drivers/gpu/drm/i915/display/intel_fbdev.c ++++ b/drivers/gpu/drm/i915/display/intel_fbdev.c +@@ -197,10 +197,10 @@ static int intelfb_create(struct drm_fb_helper *helper, + ifbdev->fb = NULL; + + if (fb && +- (sizes->fb_width > fb->base.width || +- sizes->fb_height > fb->base.height)) { ++ (sizes->fb_width != fb->base.width || ++ sizes->fb_height != fb->base.height)) { + drm_dbg_kms(&dev_priv->drm, +- "BIOS fb too small (%dx%d), we require (%dx%d)," ++ "BIOS fb not valid (%dx%d), we require (%dx%d)," + " releasing it\n", + fb->base.width, fb->base.height, + sizes->fb_width, sizes->fb_height); +diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c +index 28f497ae785b..c2952b0f8b88 100644 +--- a/drivers/gpu/drm/i915/display/intel_quirks.c ++++ b/drivers/gpu/drm/i915/display/intel_quirks.c +@@ -64,6 +64,18 @@ static void quirk_increase_ddi_disabled_time(struct intel_display *display) + drm_info(display->drm, "Applying Increase DDI Disabled quirk\n"); + } + ++/* ++ * In some cases, the firmware might not set the lane count to 4 (for example, ++ * when booting in some dual GPU Macs with the dGPU as the default GPU), this ++ * quirk is used to force it as otherwise it might not be possible to compute a ++ * valid link configuration. ++ */ ++static void quirk_ddi_a_force_4_lanes(struct intel_display *display) ++{ ++ intel_set_quirk(display, QUIRK_DDI_A_FORCE_4_LANES); ++ drm_info(display->drm, "Applying DDI A Forced 4 Lanes quirk\n"); ++} ++ + static void quirk_no_pps_backlight_power_hook(struct intel_display *display) + { + intel_set_quirk(display, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK); +@@ -229,6 +241,9 @@ static struct intel_quirk intel_quirks[] = { + { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, + /* HP Notebook - 14-r206nv */ + { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness }, ++ ++ /* Apple MacBookPro15,1 */ ++ { 0x3e9b, 0x106b, 0x0176, quirk_ddi_a_force_4_lanes }, + }; + + static const struct intel_dpcd_quirk intel_dpcd_quirks[] = { +diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h +index cafdebda7535..a5296f82776e 100644 +--- a/drivers/gpu/drm/i915/display/intel_quirks.h ++++ b/drivers/gpu/drm/i915/display/intel_quirks.h +@@ -20,6 +20,7 @@ enum intel_quirk_id { + QUIRK_LVDS_SSC_DISABLE, + QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK, + QUIRK_FW_SYNC_LEN, ++ QUIRK_DDI_A_FORCE_4_LANES, + }; + + void intel_init_quirks(struct intel_display *display); +diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c +index 08992636ec05..35cd3405d045 100644 +--- a/drivers/gpu/drm/tests/drm_format_helper_test.c ++++ b/drivers/gpu/drm/tests/drm_format_helper_test.c +@@ -60,6 +60,11 @@ struct convert_to_rgb888_result { + const u8 expected[TEST_BUF_SIZE]; + }; + ++struct convert_to_bgr888_result { ++ unsigned int dst_pitch; ++ const u8 expected[TEST_BUF_SIZE]; ++}; ++ + struct convert_to_argb8888_result { + unsigned int dst_pitch; + const u32 expected[TEST_BUF_SIZE]; +@@ -107,6 +112,7 @@ struct convert_xrgb8888_case { + struct convert_to_argb1555_result argb1555_result; + struct convert_to_rgba5551_result rgba5551_result; + struct convert_to_rgb888_result rgb888_result; ++ struct convert_to_bgr888_result bgr888_result; + struct convert_to_argb8888_result argb8888_result; + struct convert_to_xrgb2101010_result xrgb2101010_result; + struct convert_to_argb2101010_result argb2101010_result; +@@ -151,6 +157,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { 0x00, 0x00, 0xFF }, + }, ++ .bgr888_result = { ++ .dst_pitch = TEST_USE_DEFAULT_PITCH, ++ .expected = { 0xFF, 0x00, 0x00 }, ++ }, + .argb8888_result = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { 0xFFFF0000 }, +@@ -217,6 +227,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { 0x00, 0x00, 0xFF }, + }, ++ .bgr888_result = { ++ .dst_pitch = TEST_USE_DEFAULT_PITCH, ++ .expected = { 0xFF, 0x00, 0x00 }, ++ }, + .argb8888_result = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { 0xFFFF0000 }, +@@ -330,6 +344,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, + }, + }, ++ .bgr888_result = { ++ .dst_pitch = TEST_USE_DEFAULT_PITCH, ++ .expected = { ++ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, ++ 0xFF, 0x00, 0x00, 0x00, 0xFF, 0x00, ++ 0x00, 0x00, 0xFF, 0xFF, 0x00, 0xFF, ++ 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF, ++ }, ++ }, + .argb8888_result = { + .dst_pitch = TEST_USE_DEFAULT_PITCH, + .expected = { +@@ -468,6 +491,17 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + }, ++ .bgr888_result = { ++ .dst_pitch = 15, ++ .expected = { ++ 0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05, 0xA8, 0xF3, 0x03, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ 0xA8, 0x03, 0x03, 0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C, ++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ++ }, ++ }, + .argb8888_result = { + .dst_pitch = 20, + .expected = { +@@ -914,6 +948,52 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test) + KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); + } + ++static void drm_test_fb_xrgb8888_to_bgr888(struct kunit *test) ++{ ++ const struct convert_xrgb8888_case *params = test->param_value; ++ const struct convert_to_bgr888_result *result = ¶ms->bgr888_result; ++ size_t dst_size; ++ u8 *buf = NULL; ++ __le32 *xrgb8888 = NULL; ++ struct iosys_map dst, src; ++ ++ struct drm_framebuffer fb = { ++ .format = drm_format_info(DRM_FORMAT_XRGB8888), ++ .pitches = { params->pitch, 0, 0 }, ++ }; ++ ++ dst_size = conversion_buf_size(DRM_FORMAT_BGR888, result->dst_pitch, ++ ¶ms->clip, 0); ++ KUNIT_ASSERT_GT(test, dst_size, 0); ++ ++ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL); ++ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf); ++ iosys_map_set_vaddr(&dst, buf); ++ ++ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE); ++ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888); ++ iosys_map_set_vaddr(&src, xrgb8888); ++ ++ /* ++ * BGR888 expected results are already in little-endian ++ * order, so there's no need to convert the test output. ++ */ ++ drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip, ++ &fmtcnv_state); ++ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); ++ ++ buf = dst.vaddr; /* restore original value of buf */ ++ memset(buf, 0, dst_size); ++ ++ int blit_result = 0; ++ ++ blit_result = drm_fb_blit(&dst, &result->dst_pitch, DRM_FORMAT_BGR888, &src, &fb, ¶ms->clip, ++ &fmtcnv_state); ++ ++ KUNIT_EXPECT_FALSE(test, blit_result); ++ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); ++} ++ + static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test) + { + const struct convert_xrgb8888_case *params = test->param_value; +@@ -1851,6 +1931,7 @@ static struct kunit_case drm_format_helper_test_cases[] = { + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb1555, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgba5551, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params), ++ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_bgr888, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb8888, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params), + KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb2101010, convert_xrgb8888_gen_params), +diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig +index 94cbdb1337c0..1201aee7bab3 100644 +--- a/drivers/gpu/drm/tiny/Kconfig ++++ b/drivers/gpu/drm/tiny/Kconfig +@@ -1,5 +1,17 @@ + # SPDX-License-Identifier: GPL-2.0-only + ++config DRM_APPLETBDRM ++ tristate "DRM support for Apple Touch Bars" ++ depends on DRM && USB && MMU ++ select DRM_KMS_HELPER ++ select DRM_GEM_SHMEM_HELPER ++ help ++ Say Y here if you want support for the display of Touch Bars on x86 ++ MacBook Pros. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called appletbdrm. ++ + config DRM_ARCPGU + tristate "ARC PGU" + depends on DRM && OF +diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile +index 4aaf56f8707d..d9add9c3eda3 100644 +--- a/drivers/gpu/drm/tiny/Makefile ++++ b/drivers/gpu/drm/tiny/Makefile +@@ -1,5 +1,6 @@ + # SPDX-License-Identifier: GPL-2.0-only + ++obj-$(CONFIG_DRM_APPLETBDRM) += appletbdrm.o + obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o + obj-$(CONFIG_DRM_BOCHS) += bochs.o + obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o +diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c +new file mode 100644 +index 000000000000..7a74c8ad37cd +--- /dev/null ++++ b/drivers/gpu/drm/tiny/appletbdrm.c +@@ -0,0 +1,624 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Apple Touch Bar DRM Driver ++ * ++ * Copyright (c) 2023 Kerem Karabay ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#include ++ ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define _APPLETBDRM_FOURCC(s) (((s)[0] << 24) | ((s)[1] << 16) | ((s)[2] << 8) | (s)[3]) ++#define APPLETBDRM_FOURCC(s) _APPLETBDRM_FOURCC(#s) ++ ++#define APPLETBDRM_PIXEL_FORMAT APPLETBDRM_FOURCC(RGBA) /* The actual format is BGR888 */ ++#define APPLETBDRM_BITS_PER_PIXEL 24 ++ ++#define APPLETBDRM_MSG_CLEAR_DISPLAY APPLETBDRM_FOURCC(CLRD) ++#define APPLETBDRM_MSG_GET_INFORMATION APPLETBDRM_FOURCC(GINF) ++#define APPLETBDRM_MSG_UPDATE_COMPLETE APPLETBDRM_FOURCC(UDCL) ++#define APPLETBDRM_MSG_SIGNAL_READINESS APPLETBDRM_FOURCC(REDY) ++ ++#define APPLETBDRM_BULK_MSG_TIMEOUT 1000 ++ ++#define drm_to_adev(_drm) container_of(_drm, struct appletbdrm_device, drm) ++#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface(adev->dev)) ++ ++struct appletbdrm_device { ++ struct device *dev; ++ ++ u8 in_ep; ++ u8 out_ep; ++ ++ u32 width; ++ u32 height; ++ ++ struct drm_device drm; ++ struct drm_display_mode mode; ++ struct drm_connector connector; ++ struct drm_simple_display_pipe pipe; ++ ++ bool readiness_signal_received; ++}; ++ ++struct appletbdrm_request_header { ++ __le16 unk_00; ++ __le16 unk_02; ++ __le32 unk_04; ++ __le32 unk_08; ++ __le32 size; ++} __packed; ++ ++struct appletbdrm_response_header { ++ u8 unk_00[16]; ++ u32 msg; ++} __packed; ++ ++struct appletbdrm_simple_request { ++ struct appletbdrm_request_header header; ++ u32 msg; ++ u8 unk_14[8]; ++ __le32 size; ++} __packed; ++ ++struct appletbdrm_information { ++ struct appletbdrm_response_header header; ++ u8 unk_14[12]; ++ __le32 width; ++ __le32 height; ++ u8 bits_per_pixel; ++ __le32 bytes_per_row; ++ __le32 orientation; ++ __le32 bitmap_info; ++ u32 pixel_format; ++ __le32 width_inches; /* floating point */ ++ __le32 height_inches; /* floating point */ ++} __packed; ++ ++struct appletbdrm_frame { ++ __le16 begin_x; ++ __le16 begin_y; ++ __le16 width; ++ __le16 height; ++ __le32 buf_size; ++ u8 buf[]; ++} __packed; ++ ++struct appletbdrm_fb_request_footer { ++ u8 unk_00[12]; ++ __le32 unk_0c; ++ u8 unk_10[12]; ++ __le32 unk_1c; ++ __le64 timestamp; ++ u8 unk_28[12]; ++ __le32 unk_34; ++ u8 unk_38[20]; ++ __le32 unk_4c; ++} __packed; ++ ++struct appletbdrm_fb_request { ++ struct appletbdrm_request_header header; ++ __le16 unk_10; ++ u8 msg_id; ++ u8 unk_13[29]; ++ /* ++ * Contents of `data`: ++ * - struct appletbdrm_frame frames[]; ++ * - struct appletbdrm_fb_request_footer footer; ++ * - padding to make the total size a multiple of 16 ++ */ ++ u8 data[]; ++} __packed; ++ ++struct appletbdrm_fb_request_response { ++ struct appletbdrm_response_header header; ++ u8 unk_14[12]; ++ __le64 timestamp; ++} __packed; ++ ++static int appletbdrm_send_request(struct appletbdrm_device *adev, ++ struct appletbdrm_request_header *request, size_t size) ++{ ++ struct usb_device *udev = adev_to_udev(adev); ++ struct drm_device *drm = &adev->drm; ++ int ret, actual_size; ++ ++ ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, adev->out_ep), ++ request, size, &actual_size, APPLETBDRM_BULK_MSG_TIMEOUT); ++ if (ret) { ++ drm_err(drm, "Failed to send message (%pe)\n", ERR_PTR(ret)); ++ return ret; ++ } ++ ++ if (actual_size != size) { ++ drm_err(drm, "Actual size (%d) doesn't match expected size (%lu)\n", ++ actual_size, size); ++ return -EIO; ++ } ++ ++ return ret; ++} ++ ++static int appletbdrm_read_response(struct appletbdrm_device *adev, ++ struct appletbdrm_response_header *response, ++ size_t size, u32 expected_response) ++{ ++ struct usb_device *udev = adev_to_udev(adev); ++ struct drm_device *drm = &adev->drm; ++ int ret, actual_size; ++ ++retry: ++ ret = usb_bulk_msg(udev, usb_rcvbulkpipe(udev, adev->in_ep), ++ response, size, &actual_size, APPLETBDRM_BULK_MSG_TIMEOUT); ++ if (ret) { ++ drm_err(drm, "Failed to read response (%pe)\n", ERR_PTR(ret)); ++ return ret; ++ } ++ ++ /* ++ * The device responds to the first request sent in a particular ++ * timeframe after the USB device configuration is set with a readiness ++ * signal, in which case the response should be read again ++ */ ++ if (response->msg == APPLETBDRM_MSG_SIGNAL_READINESS) { ++ if (!adev->readiness_signal_received) { ++ adev->readiness_signal_received = true; ++ goto retry; ++ } ++ ++ drm_err(drm, "Encountered unexpected readiness signal\n"); ++ return -EIO; ++ } ++ ++ if (actual_size != size) { ++ drm_err(drm, "Actual size (%d) doesn't match expected size (%lu)\n", ++ actual_size, size); ++ return -EIO; ++ } ++ ++ if (response->msg != expected_response) { ++ drm_err(drm, "Unexpected response from device (expected %p4ch found %p4ch)\n", ++ &expected_response, &response->msg); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++static int appletbdrm_send_msg(struct appletbdrm_device *adev, u32 msg) ++{ ++ struct appletbdrm_simple_request *request; ++ int ret; ++ ++ request = kzalloc(sizeof(*request), GFP_KERNEL); ++ if (!request) ++ return -ENOMEM; ++ ++ request->header.unk_00 = cpu_to_le16(2); ++ request->header.unk_02 = cpu_to_le16(0x1512); ++ request->header.size = cpu_to_le32(sizeof(*request) - sizeof(request->header)); ++ request->msg = msg; ++ request->size = request->header.size; ++ ++ ret = appletbdrm_send_request(adev, &request->header, sizeof(*request)); ++ ++ kfree(request); ++ ++ return ret; ++} ++ ++static int appletbdrm_clear_display(struct appletbdrm_device *adev) ++{ ++ return appletbdrm_send_msg(adev, APPLETBDRM_MSG_CLEAR_DISPLAY); ++} ++ ++static int appletbdrm_signal_readiness(struct appletbdrm_device *adev) ++{ ++ return appletbdrm_send_msg(adev, APPLETBDRM_MSG_SIGNAL_READINESS); ++} ++ ++static int appletbdrm_get_information(struct appletbdrm_device *adev) ++{ ++ struct appletbdrm_information *info; ++ struct drm_device *drm = &adev->drm; ++ u8 bits_per_pixel; ++ u32 pixel_format; ++ int ret; ++ ++ info = kzalloc(sizeof(*info), GFP_KERNEL); ++ if (!info) ++ return -ENOMEM; ++ ++ ret = appletbdrm_send_msg(adev, APPLETBDRM_MSG_GET_INFORMATION); ++ if (ret) ++ return ret; ++ ++ ret = appletbdrm_read_response(adev, &info->header, sizeof(*info), ++ APPLETBDRM_MSG_GET_INFORMATION); ++ if (ret) ++ goto free_info; ++ ++ bits_per_pixel = info->bits_per_pixel; ++ pixel_format = get_unaligned(&info->pixel_format); ++ ++ adev->width = get_unaligned_le32(&info->width); ++ adev->height = get_unaligned_le32(&info->height); ++ ++ if (bits_per_pixel != APPLETBDRM_BITS_PER_PIXEL) { ++ drm_err(drm, "Encountered unexpected bits per pixel value (%d)\n", bits_per_pixel); ++ ret = -EINVAL; ++ goto free_info; ++ } ++ ++ if (pixel_format != APPLETBDRM_PIXEL_FORMAT) { ++ drm_err(drm, "Encountered unknown pixel format (%p4ch)\n", &pixel_format); ++ ret = -EINVAL; ++ goto free_info; ++ } ++ ++free_info: ++ kfree(info); ++ ++ return ret; ++} ++ ++static u32 rect_size(struct drm_rect *rect) ++{ ++ return drm_rect_width(rect) * drm_rect_height(rect) * (APPLETBDRM_BITS_PER_PIXEL / 8); ++} ++ ++static int appletbdrm_flush_damage(struct appletbdrm_device *adev, ++ struct drm_plane_state *old_state, ++ struct drm_plane_state *state) ++{ ++ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state); ++ struct appletbdrm_fb_request_response *response; ++ struct appletbdrm_fb_request_footer *footer; ++ struct drm_atomic_helper_damage_iter iter; ++ struct drm_framebuffer *fb = state->fb; ++ struct appletbdrm_fb_request *request; ++ struct drm_device *drm = &adev->drm; ++ struct appletbdrm_frame *frame; ++ u64 timestamp = ktime_get_ns(); ++ struct drm_rect damage; ++ size_t frames_size = 0; ++ size_t request_size; ++ int ret; ++ ++ drm_atomic_helper_damage_iter_init(&iter, old_state, state); ++ drm_atomic_for_each_plane_damage(&iter, &damage) { ++ frames_size += struct_size(frame, buf, rect_size(&damage)); ++ } ++ ++ if (!frames_size) ++ return 0; ++ ++ request_size = ALIGN(sizeof(*request) + frames_size + sizeof(*footer), 16); ++ ++ request = kzalloc(request_size, GFP_KERNEL); ++ if (!request) ++ return -ENOMEM; ++ ++ response = kzalloc(sizeof(*response), GFP_KERNEL); ++ if (!response) { ++ ret = -ENOMEM; ++ goto free_request; ++ } ++ ++ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE); ++ if (ret) { ++ drm_err(drm, "Failed to start CPU framebuffer access (%pe)\n", ERR_PTR(ret)); ++ goto free_response; ++ } ++ ++ request->header.unk_00 = cpu_to_le16(2); ++ request->header.unk_02 = cpu_to_le16(0x12); ++ request->header.unk_04 = cpu_to_le32(9); ++ request->header.size = cpu_to_le32(request_size - sizeof(request->header)); ++ request->unk_10 = cpu_to_le16(1); ++ request->msg_id = timestamp & 0xff; ++ ++ frame = (struct appletbdrm_frame *)request->data; ++ ++ drm_atomic_helper_damage_iter_init(&iter, old_state, state); ++ drm_atomic_for_each_plane_damage(&iter, &damage) { ++ struct iosys_map dst = IOSYS_MAP_INIT_VADDR(frame->buf); ++ u32 buf_size = rect_size(&damage); ++ ++ /* ++ * The coordinates need to be translated to the coordinate ++ * system the device expects, see the comment in ++ * appletbdrm_setup_mode_config ++ */ ++ frame->begin_x = cpu_to_le16(damage.y1); ++ frame->begin_y = cpu_to_le16(adev->height - damage.x2); ++ frame->width = cpu_to_le16(drm_rect_height(&damage)); ++ frame->height = cpu_to_le16(drm_rect_width(&damage)); ++ frame->buf_size = cpu_to_le32(buf_size); ++ ++ ret = drm_fb_blit(&dst, NULL, DRM_FORMAT_BGR888, ++ &shadow_plane_state->data[0], fb, &damage, &shadow_plane_state->fmtcnv_state); ++ if (ret) { ++ drm_err(drm, "Failed to copy damage clip (%pe)\n", ERR_PTR(ret)); ++ goto end_fb_cpu_access; ++ } ++ ++ frame = (void *)frame + struct_size(frame, buf, buf_size); ++ } ++ ++ footer = (struct appletbdrm_fb_request_footer *)&request->data[frames_size]; ++ ++ footer->unk_0c = cpu_to_le32(0xfffe); ++ footer->unk_1c = cpu_to_le32(0x80001); ++ footer->unk_34 = cpu_to_le32(0x80002); ++ footer->unk_4c = cpu_to_le32(0xffff); ++ footer->timestamp = cpu_to_le64(timestamp); ++ ++ ret = appletbdrm_send_request(adev, &request->header, request_size); ++ if (ret) ++ goto end_fb_cpu_access; ++ ++ ret = appletbdrm_read_response(adev, &response->header, sizeof(*response), ++ APPLETBDRM_MSG_UPDATE_COMPLETE); ++ if (ret) ++ goto end_fb_cpu_access; ++ ++ if (response->timestamp != footer->timestamp) { ++ drm_err(drm, "Response timestamp (%llu) doesn't match request timestamp (%llu)\n", ++ le64_to_cpu(response->timestamp), timestamp); ++ goto end_fb_cpu_access; ++ } ++ ++end_fb_cpu_access: ++ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); ++free_response: ++ kfree(response); ++free_request: ++ kfree(request); ++ ++ return ret; ++} ++ ++static int appletbdrm_connector_helper_get_modes(struct drm_connector *connector) ++{ ++ struct appletbdrm_device *adev = drm_to_adev(connector->dev); ++ ++ return drm_connector_helper_get_modes_fixed(connector, &adev->mode); ++} ++ ++static enum drm_mode_status appletbdrm_pipe_mode_valid(struct drm_simple_display_pipe *pipe, ++ const struct drm_display_mode *mode) ++{ ++ struct drm_crtc *crtc = &pipe->crtc; ++ struct appletbdrm_device *adev = drm_to_adev(crtc->dev); ++ ++ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &adev->mode); ++} ++ ++static void appletbdrm_pipe_disable(struct drm_simple_display_pipe *pipe) ++{ ++ struct appletbdrm_device *adev = drm_to_adev(pipe->crtc.dev); ++ int idx; ++ ++ if (!drm_dev_enter(&adev->drm, &idx)) ++ return; ++ ++ appletbdrm_clear_display(adev); ++ ++ drm_dev_exit(idx); ++} ++ ++static void appletbdrm_pipe_update(struct drm_simple_display_pipe *pipe, ++ struct drm_plane_state *old_state) ++{ ++ struct drm_crtc *crtc = &pipe->crtc; ++ struct appletbdrm_device *adev = drm_to_adev(crtc->dev); ++ int idx; ++ ++ if (!crtc->state->active || !drm_dev_enter(&adev->drm, &idx)) ++ return; ++ ++ appletbdrm_flush_damage(adev, old_state, pipe->plane.state); ++ ++ drm_dev_exit(idx); ++} ++ ++static const u32 appletbdrm_formats[] = { ++ DRM_FORMAT_BGR888, ++ DRM_FORMAT_XRGB8888, /* emulated */ ++}; ++ ++static const struct drm_mode_config_funcs appletbdrm_mode_config_funcs = { ++ .fb_create = drm_gem_fb_create_with_dirty, ++ .atomic_check = drm_atomic_helper_check, ++ .atomic_commit = drm_atomic_helper_commit, ++}; ++ ++static const struct drm_connector_funcs appletbdrm_connector_funcs = { ++ .reset = drm_atomic_helper_connector_reset, ++ .destroy = drm_connector_cleanup, ++ .fill_modes = drm_helper_probe_single_connector_modes, ++ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, ++ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, ++}; ++ ++static const struct drm_connector_helper_funcs appletbdrm_connector_helper_funcs = { ++ .get_modes = appletbdrm_connector_helper_get_modes, ++}; ++ ++static const struct drm_simple_display_pipe_funcs appletbdrm_pipe_funcs = { ++ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS, ++ .update = appletbdrm_pipe_update, ++ .disable = appletbdrm_pipe_disable, ++ .mode_valid = appletbdrm_pipe_mode_valid, ++}; ++ ++DEFINE_DRM_GEM_FOPS(appletbdrm_drm_fops); ++ ++static const struct drm_driver appletbdrm_drm_driver = { ++ DRM_GEM_SHMEM_DRIVER_OPS, ++ .name = "appletbdrm", ++ .desc = "Apple Touch Bar DRM Driver", ++ .date = "20230910", ++ .major = 1, ++ .minor = 0, ++ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, ++ .fops = &appletbdrm_drm_fops, ++}; ++ ++static int appletbdrm_setup_mode_config(struct appletbdrm_device *adev) ++{ ++ struct drm_connector *connector = &adev->connector; ++ struct drm_device *drm = &adev->drm; ++ struct device *dev = adev->dev; ++ int ret; ++ ++ ret = drmm_mode_config_init(drm); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to initialize mode configuration\n"); ++ ++ /* ++ * The coordinate system used by the device is different from the ++ * coordinate system of the framebuffer in that the x and y axes are ++ * swapped, and that the y axis is inverted; so what the device reports ++ * as the height is actually the width of the framebuffer and vice ++ * versa ++ */ ++ drm->mode_config.min_width = 0; ++ drm->mode_config.min_height = 0; ++ drm->mode_config.max_width = max(adev->height, DRM_SHADOW_PLANE_MAX_WIDTH); ++ drm->mode_config.max_height = max(adev->width, DRM_SHADOW_PLANE_MAX_HEIGHT); ++ drm->mode_config.preferred_depth = APPLETBDRM_BITS_PER_PIXEL; ++ drm->mode_config.funcs = &appletbdrm_mode_config_funcs; ++ ++ adev->mode = (struct drm_display_mode) { ++ DRM_MODE_INIT(60, adev->height, adev->width, ++ DRM_MODE_RES_MM(adev->height, 218), ++ DRM_MODE_RES_MM(adev->width, 218)) ++ }; ++ ++ ret = drm_connector_init(drm, connector, ++ &appletbdrm_connector_funcs, DRM_MODE_CONNECTOR_USB); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to initialize connector\n"); ++ ++ drm_connector_helper_add(connector, &appletbdrm_connector_helper_funcs); ++ ++ ret = drm_connector_set_panel_orientation(connector, ++ DRM_MODE_PANEL_ORIENTATION_RIGHT_UP); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to set panel orientation\n"); ++ ++ connector->display_info.non_desktop = true; ++ ret = drm_object_property_set_value(&connector->base, ++ drm->mode_config.non_desktop_property, true); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to set non-desktop property\n"); ++ ++ ret = drm_simple_display_pipe_init(drm, &adev->pipe, &appletbdrm_pipe_funcs, ++ appletbdrm_formats, ARRAY_SIZE(appletbdrm_formats), ++ NULL, &adev->connector); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to initialize simple display pipe\n"); ++ ++ drm_plane_enable_fb_damage_clips(&adev->pipe.plane); ++ ++ drm_mode_config_reset(drm); ++ ++ ret = drm_dev_register(drm, 0); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to register DRM device\n"); ++ ++ return 0; ++} ++ ++static int appletbdrm_probe(struct usb_interface *intf, ++ const struct usb_device_id *id) ++{ ++ struct usb_endpoint_descriptor *bulk_in, *bulk_out; ++ struct device *dev = &intf->dev; ++ struct appletbdrm_device *adev; ++ int ret; ++ ++ ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to find bulk endpoints\n"); ++ ++ adev = devm_drm_dev_alloc(dev, &appletbdrm_drm_driver, struct appletbdrm_device, drm); ++ if (IS_ERR(adev)) ++ return PTR_ERR(adev); ++ ++ adev->dev = dev; ++ adev->in_ep = bulk_in->bEndpointAddress; ++ adev->out_ep = bulk_out->bEndpointAddress; ++ ++ usb_set_intfdata(intf, adev); ++ ++ ret = appletbdrm_get_information(adev); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to get display information\n"); ++ ++ ret = appletbdrm_signal_readiness(adev); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to signal readiness\n"); ++ ++ ret = appletbdrm_clear_display(adev); ++ if (ret) ++ return dev_err_probe(dev, ret, "Failed to clear display\n"); ++ ++ return appletbdrm_setup_mode_config(adev); ++} ++ ++static void appletbdrm_disconnect(struct usb_interface *intf) ++{ ++ struct appletbdrm_device *adev = usb_get_intfdata(intf); ++ struct drm_device *drm = &adev->drm; ++ ++ drm_dev_unplug(drm); ++ drm_atomic_helper_shutdown(drm); ++} ++ ++static void appletbdrm_shutdown(struct usb_interface *intf) ++{ ++ struct appletbdrm_device *adev = usb_get_intfdata(intf); ++ ++ /* ++ * The framebuffer needs to be cleared on shutdown since its content ++ * persists across boots ++ */ ++ drm_atomic_helper_shutdown(&adev->drm); ++} ++ ++static const struct usb_device_id appletbdrm_usb_id_table[] = { ++ { USB_DEVICE_INTERFACE_CLASS(0x05ac, 0x8302, USB_CLASS_AUDIO_VIDEO) }, ++ {} ++}; ++MODULE_DEVICE_TABLE(usb, appletbdrm_usb_id_table); ++ ++static struct usb_driver appletbdrm_usb_driver = { ++ .name = "appletbdrm", ++ .probe = appletbdrm_probe, ++ .disconnect = appletbdrm_disconnect, ++ .shutdown = appletbdrm_shutdown, ++ .id_table = appletbdrm_usb_id_table, ++}; ++module_usb_driver(appletbdrm_usb_driver); ++ ++MODULE_AUTHOR("Kerem Karabay "); ++MODULE_DESCRIPTION("Apple Touch Bar DRM Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c +index 18f2c92beff8..3de1bca45ed2 100644 +--- a/drivers/gpu/vga/vga_switcheroo.c ++++ b/drivers/gpu/vga/vga_switcheroo.c +@@ -438,12 +438,7 @@ find_active_client(struct list_head *head) + bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) + { + if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { +- /* +- * apple-gmux is needed on pre-retina MacBook Pro +- * to probe the panel if pdev is the inactive GPU. +- */ +- if (apple_gmux_present() && pdev != vga_default_device() && +- !vgasr_priv.handler_flags) ++ if (apple_gmux_present() && !vgasr_priv.handler_flags) + return true; + } + diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig -index 4d2a89d65b65..89357822c27b 100644 +index 4d2a89d65b65..79426232a460 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig -@@ -164,6 +164,15 @@ config HID_ASUS +@@ -148,6 +148,31 @@ config HID_APPLEIR + + Say Y here if you want support for Apple infrared remote control. + ++config HID_APPLETB_BL ++ tristate "Apple Touch Bar Backlight" ++ depends on BACKLIGHT_CLASS_DEVICE ++ help ++ Say Y here if you want support for the backlight of Touch Bars on x86 ++ MacBook Pros. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called hid-appletb-bl. ++ ++config HID_APPLETB_KBD ++ tristate "Apple Touch Bar Keyboard Mode" ++ depends on USB_HID ++ depends on BACKLIGHT_CLASS_DEVICE ++ depends on INPUT ++ select INPUT_SPARSEKMAP ++ select HID_APPLETB_BL ++ help ++ Say Y here if you want support for the keyboard mode (escape, ++ function, media and brightness keys) of Touch Bars on x86 MacBook ++ Pros. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called hid-appletb-kbd. ++ + config HID_ASUS + tristate "Asus" + depends on USB_HID +@@ -164,6 +189,15 @@ config HID_ASUS - GL553V series - GL753V series @@ -5546,18 +7836,755 @@ index 4d2a89d65b65..89357822c27b 100644 config HID_AUREAL tristate "Aureal" help +@@ -741,6 +775,7 @@ config HID_MULTITOUCH + Say Y here if you have one of the following devices: + - 3M PCT touch screens + - ActionStar dual touch panels ++ - Touch Bars on x86 MacBook Pros + - Atmel panels + - Cando dual touch panels + - Chunghwa panels diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile -index 24de45f3677d..f338c9eb4600 100644 +index 24de45f3677d..a6a50ebf9b17 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile -@@ -31,6 +31,7 @@ obj-$(CONFIG_HID_APPLE) += hid-apple.o +@@ -29,8 +29,11 @@ obj-$(CONFIG_HID_ALPS) += hid-alps.o + obj-$(CONFIG_HID_ACRUX) += hid-axff.o + obj-$(CONFIG_HID_APPLE) += hid-apple.o obj-$(CONFIG_HID_APPLEIR) += hid-appleir.o ++obj-$(CONFIG_HID_APPLETB_BL) += hid-appletb-bl.o ++obj-$(CONFIG_HID_APPLETB_KBD) += hid-appletb-kbd.o obj-$(CONFIG_HID_CREATIVE_SB0540) += hid-creative-sb0540.o obj-$(CONFIG_HID_ASUS) += hid-asus.o +obj-$(CONFIG_HID_ASUS_ALLY) += hid-asus-ally.o obj-$(CONFIG_HID_AUREAL) += hid-aureal.o obj-$(CONFIG_HID_BELKIN) += hid-belkin.o obj-$(CONFIG_HID_BETOP_FF) += hid-betopff.o +diff --git a/drivers/hid/hid-appletb-bl.c b/drivers/hid/hid-appletb-bl.c +new file mode 100644 +index 000000000000..819157686e59 +--- /dev/null ++++ b/drivers/hid/hid-appletb-bl.c +@@ -0,0 +1,207 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Apple Touch Bar Backlight Driver ++ * ++ * Copyright (c) 2017-2018 Ronald Tschalär ++ * Copyright (c) 2022-2023 Kerem Karabay ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#include ++#include ++#include ++ ++#include "hid-ids.h" ++ ++#define APPLETB_BL_ON 1 ++#define APPLETB_BL_DIM 3 ++#define APPLETB_BL_OFF 4 ++ ++#define HID_UP_APPLEVENDOR_TB_BL 0xff120000 ++ ++#define HID_VD_APPLE_TB_BRIGHTNESS 0xff120001 ++#define HID_USAGE_AUX1 0xff120020 ++#define HID_USAGE_BRIGHTNESS 0xff120021 ++ ++static int appletb_bl_def_brightness = 2; ++module_param_named(brightness, appletb_bl_def_brightness, int, 0444); ++MODULE_PARM_DESC(brightness, "Default brightness:\n" ++ " 0 - Touchbar is off\n" ++ " 1 - Dim brightness\n" ++ " [2] - Full brightness"); ++ ++struct appletb_bl { ++ struct hid_field *aux1_field, *brightness_field; ++ struct backlight_device *bdev; ++ ++ bool full_on; ++}; ++ ++static const u8 appletb_bl_brightness_map[] = { ++ APPLETB_BL_OFF, ++ APPLETB_BL_DIM, ++ APPLETB_BL_ON, ++}; ++ ++static int appletb_bl_set_brightness(struct appletb_bl *bl, u8 brightness) ++{ ++ struct hid_report *report = bl->brightness_field->report; ++ struct hid_device *hdev = report->device; ++ int ret; ++ ++ ret = hid_set_field(bl->aux1_field, 0, 1); ++ if (ret) { ++ hid_err(hdev, "Failed to set auxiliary field (%pe)\n", ERR_PTR(ret)); ++ return ret; ++ } ++ ++ ret = hid_set_field(bl->brightness_field, 0, brightness); ++ if (ret) { ++ hid_err(hdev, "Failed to set brightness field (%pe)\n", ERR_PTR(ret)); ++ return ret; ++ } ++ ++ if (!bl->full_on) { ++ ret = hid_hw_power(hdev, PM_HINT_FULLON); ++ if (ret < 0) { ++ hid_err(hdev, "Device didn't power on (%pe)\n", ERR_PTR(ret)); ++ return ret; ++ } ++ ++ bl->full_on = true; ++ } ++ ++ hid_hw_request(hdev, report, HID_REQ_SET_REPORT); ++ ++ if (brightness == APPLETB_BL_OFF) { ++ hid_hw_power(hdev, PM_HINT_NORMAL); ++ bl->full_on = false; ++ } ++ ++ return 0; ++} ++ ++static int appletb_bl_update_status(struct backlight_device *bdev) ++{ ++ struct appletb_bl *bl = bl_get_data(bdev); ++ u8 brightness; ++ ++ if (backlight_is_blank(bdev)) ++ brightness = APPLETB_BL_OFF; ++ else ++ brightness = appletb_bl_brightness_map[backlight_get_brightness(bdev)]; ++ ++ return appletb_bl_set_brightness(bl, brightness); ++} ++ ++static const struct backlight_ops appletb_bl_backlight_ops = { ++ .options = BL_CORE_SUSPENDRESUME, ++ .update_status = appletb_bl_update_status, ++}; ++ ++static int appletb_bl_probe(struct hid_device *hdev, const struct hid_device_id *id) ++{ ++ struct hid_field *aux1_field, *brightness_field; ++ struct backlight_properties bl_props = { 0 }; ++ struct device *dev = &hdev->dev; ++ struct appletb_bl *bl; ++ int ret; ++ ++ ret = hid_parse(hdev); ++ if (ret) ++ return dev_err_probe(dev, ret, "HID parse failed\n"); ++ ++ aux1_field = hid_find_field(hdev, HID_FEATURE_REPORT, ++ HID_VD_APPLE_TB_BRIGHTNESS, HID_USAGE_AUX1); ++ ++ brightness_field = hid_find_field(hdev, HID_FEATURE_REPORT, ++ HID_VD_APPLE_TB_BRIGHTNESS, HID_USAGE_BRIGHTNESS); ++ ++ if (!aux1_field || !brightness_field) ++ return -ENODEV; ++ ++ if (aux1_field->report != brightness_field->report) ++ return dev_err_probe(dev, -ENODEV, "Encountered unexpected report structure\n"); ++ ++ bl = devm_kzalloc(dev, sizeof(*bl), GFP_KERNEL); ++ if (!bl) ++ return -ENOMEM; ++ ++ ret = hid_hw_start(hdev, HID_CONNECT_DRIVER); ++ if (ret) ++ return dev_err_probe(dev, ret, "HID hardware start failed\n"); ++ ++ ret = hid_hw_open(hdev); ++ if (ret) { ++ dev_err_probe(dev, ret, "HID hardware open failed\n"); ++ goto stop_hw; ++ } ++ ++ bl->aux1_field = aux1_field; ++ bl->brightness_field = brightness_field; ++ ++ if (appletb_bl_def_brightness == 0) ++ ret = appletb_bl_set_brightness(bl, APPLETB_BL_OFF); ++ else if (appletb_bl_def_brightness == 1) ++ ret = appletb_bl_set_brightness(bl, APPLETB_BL_DIM); ++ else ++ ret = appletb_bl_set_brightness(bl, APPLETB_BL_ON); ++ ++ if (ret) { ++ dev_err_probe(dev, ret, "Failed to set touch bar brightness to off\n"); ++ goto close_hw; ++ } ++ ++ bl_props.type = BACKLIGHT_RAW; ++ bl_props.max_brightness = ARRAY_SIZE(appletb_bl_brightness_map) - 1; ++ ++ bl->bdev = devm_backlight_device_register(dev, "appletb_backlight", dev, bl, ++ &appletb_bl_backlight_ops, &bl_props); ++ if (IS_ERR(bl->bdev)) { ++ ret = PTR_ERR(bl->bdev); ++ dev_err_probe(dev, ret, "Failed to register backlight device\n"); ++ goto close_hw; ++ } ++ ++ hid_set_drvdata(hdev, bl); ++ ++ return 0; ++ ++close_hw: ++ hid_hw_close(hdev); ++stop_hw: ++ hid_hw_stop(hdev); ++ ++ return ret; ++} ++ ++static void appletb_bl_remove(struct hid_device *hdev) ++{ ++ struct appletb_bl *bl = hid_get_drvdata(hdev); ++ ++ appletb_bl_set_brightness(bl, APPLETB_BL_OFF); ++ ++ hid_hw_close(hdev); ++ hid_hw_stop(hdev); ++} ++ ++static const struct hid_device_id appletb_bl_hid_ids[] = { ++ /* MacBook Pro's 2018, 2019, with T2 chip: iBridge DFR Brightness */ ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT) }, ++ { } ++}; ++MODULE_DEVICE_TABLE(hid, appletb_bl_hid_ids); ++ ++static struct hid_driver appletb_bl_hid_driver = { ++ .name = "hid-appletb-bl", ++ .id_table = appletb_bl_hid_ids, ++ .probe = appletb_bl_probe, ++ .remove = appletb_bl_remove, ++}; ++module_hid_driver(appletb_bl_hid_driver); ++ ++MODULE_AUTHOR("Ronald Tschalär"); ++MODULE_AUTHOR("Kerem Karabay "); ++MODULE_DESCRIPTION("MacBookPro Touch Bar Backlight Driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/hid/hid-appletb-kbd.c b/drivers/hid/hid-appletb-kbd.c +new file mode 100644 +index 000000000000..fa28a691da6a +--- /dev/null ++++ b/drivers/hid/hid-appletb-kbd.c +@@ -0,0 +1,506 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Apple Touch Bar Keyboard Mode Driver ++ * ++ * Copyright (c) 2017-2018 Ronald Tschalär ++ * Copyright (c) 2022-2023 Kerem Karabay ++ * Copyright (c) 2024 Aditya Garg ++ */ ++ ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "hid-ids.h" ++ ++#define APPLETB_KBD_MODE_ESC 0 ++#define APPLETB_KBD_MODE_FN 1 ++#define APPLETB_KBD_MODE_SPCL 2 ++#define APPLETB_KBD_MODE_OFF 3 ++#define APPLETB_KBD_MODE_MAX APPLETB_KBD_MODE_OFF ++ ++#define APPLETB_DEVID_KEYBOARD 1 ++#define APPLETB_DEVID_TRACKPAD 2 ++ ++#define HID_USAGE_MODE 0x00ff0004 ++ ++static int appletb_tb_def_mode = APPLETB_KBD_MODE_SPCL; ++module_param_named(mode, appletb_tb_def_mode, int, 0444); ++MODULE_PARM_DESC(mode, "Default touchbar mode:\n" ++ " 0 - escape key only\n" ++ " 1 - function-keys\n" ++ " [2] - special keys"); ++ ++static bool appletb_tb_fn_toggle = true; ++module_param_named(fntoggle, appletb_tb_fn_toggle, bool, 0644); ++MODULE_PARM_DESC(fntoggle, "Switch between Fn and media controls on pressing Fn key"); ++ ++static bool appletb_tb_autodim = true; ++module_param_named(autodim, appletb_tb_autodim, bool, 0644); ++MODULE_PARM_DESC(autodim, "Automatically dim and turn off the Touch Bar after some time"); ++ ++static int appletb_tb_dim_timeout = 60; ++module_param_named(dim_timeout, appletb_tb_dim_timeout, int, 0644); ++MODULE_PARM_DESC(dim_timeout, "Dim timeout in sec"); ++ ++static int appletb_tb_idle_timeout = 15; ++module_param_named(idle_timeout, appletb_tb_idle_timeout, int, 0644); ++MODULE_PARM_DESC(idle_timeout, "Idle timeout in sec"); ++ ++struct appletb_kbd { ++ struct hid_field *mode_field; ++ struct input_handler inp_handler; ++ struct input_handle kbd_handle; ++ struct input_handle tpd_handle; ++ struct backlight_device *backlight_dev; ++ struct timer_list inactivity_timer; ++ bool has_dimmed; ++ bool has_turned_off; ++ u8 saved_mode; ++ u8 current_mode; ++}; ++ ++static const struct key_entry appletb_kbd_keymap[] = { ++ { KE_KEY, KEY_ESC, { KEY_ESC } }, ++ { KE_KEY, KEY_F1, { KEY_BRIGHTNESSDOWN } }, ++ { KE_KEY, KEY_F2, { KEY_BRIGHTNESSUP } }, ++ { KE_KEY, KEY_F3, { KEY_RESERVED } }, ++ { KE_KEY, KEY_F4, { KEY_RESERVED } }, ++ { KE_KEY, KEY_F5, { KEY_KBDILLUMDOWN } }, ++ { KE_KEY, KEY_F6, { KEY_KBDILLUMUP } }, ++ { KE_KEY, KEY_F7, { KEY_PREVIOUSSONG } }, ++ { KE_KEY, KEY_F8, { KEY_PLAYPAUSE } }, ++ { KE_KEY, KEY_F9, { KEY_NEXTSONG } }, ++ { KE_KEY, KEY_F10, { KEY_MUTE } }, ++ { KE_KEY, KEY_F11, { KEY_VOLUMEDOWN } }, ++ { KE_KEY, KEY_F12, { KEY_VOLUMEUP } }, ++ { KE_END, 0 } ++}; ++ ++static int appletb_kbd_set_mode(struct appletb_kbd *kbd, u8 mode) ++{ ++ struct hid_report *report = kbd->mode_field->report; ++ struct hid_device *hdev = report->device; ++ int ret; ++ ++ ret = hid_hw_power(hdev, PM_HINT_FULLON); ++ if (ret) { ++ hid_err(hdev, "Device didn't resume (%pe)\n", ERR_PTR(ret)); ++ return ret; ++ } ++ ++ ret = hid_set_field(kbd->mode_field, 0, mode); ++ if (ret) { ++ hid_err(hdev, "Failed to set mode field to %u (%pe)\n", mode, ERR_PTR(ret)); ++ goto power_normal; ++ } ++ ++ hid_hw_request(hdev, report, HID_REQ_SET_REPORT); ++ ++ kbd->current_mode = mode; ++ ++power_normal: ++ hid_hw_power(hdev, PM_HINT_NORMAL); ++ ++ return ret; ++} ++ ++static ssize_t mode_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct appletb_kbd *kbd = dev_get_drvdata(dev); ++ ++ return sysfs_emit(buf, "%d\n", kbd->current_mode); ++} ++ ++static ssize_t mode_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t size) ++{ ++ struct appletb_kbd *kbd = dev_get_drvdata(dev); ++ u8 mode; ++ int ret; ++ ++ ret = kstrtou8(buf, 0, &mode); ++ if (ret) ++ return ret; ++ ++ if (mode > APPLETB_KBD_MODE_MAX) ++ return -EINVAL; ++ ++ ret = appletb_kbd_set_mode(kbd, mode); ++ ++ return ret < 0 ? ret : size; ++} ++static DEVICE_ATTR_RW(mode); ++ ++struct attribute *appletb_kbd_attrs[] = { ++ &dev_attr_mode.attr, ++ NULL ++}; ++ATTRIBUTE_GROUPS(appletb_kbd); ++ ++static int appletb_tb_key_to_slot(unsigned int code) ++{ ++ switch (code) { ++ case KEY_ESC: ++ return 0; ++ case KEY_F1 ... KEY_F10: ++ return code - KEY_F1 + 1; ++ case KEY_F11 ... KEY_F12: ++ return code - KEY_F11 + 11; ++ ++ default: ++ return -EINVAL; ++ } ++} ++ ++static void appletb_inactivity_timer(struct timer_list *t) ++{ ++ struct appletb_kbd *kbd = from_timer(kbd, t, inactivity_timer); ++ ++ if (kbd->backlight_dev && appletb_tb_autodim) { ++ if (!kbd->has_dimmed) { ++ backlight_device_set_brightness(kbd->backlight_dev, 1); ++ kbd->has_dimmed = true; ++ mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_idle_timeout * 1000)); ++ } else if (!kbd->has_turned_off) { ++ backlight_device_set_brightness(kbd->backlight_dev, 0); ++ kbd->has_turned_off = true; ++ } ++ } ++} ++ ++static void reset_inactivity_timer(struct appletb_kbd *kbd) ++{ ++ if (kbd->backlight_dev && appletb_tb_autodim) { ++ if (kbd->has_dimmed || kbd->has_turned_off) { ++ backlight_device_set_brightness(kbd->backlight_dev, 2); ++ kbd->has_dimmed = false; ++ kbd->has_turned_off = false; ++ } ++ mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000)); ++ } ++} ++ ++static int appletb_kbd_hid_event(struct hid_device *hdev, struct hid_field *field, ++ struct hid_usage *usage, __s32 value) ++{ ++ struct appletb_kbd *kbd = hid_get_drvdata(hdev); ++ struct key_entry *translation; ++ struct input_dev *input; ++ int slot; ++ ++ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_KEYBOARD || usage->type != EV_KEY) ++ return 0; ++ ++ input = field->hidinput->input; ++ ++ /* ++ * Skip non-touch-bar keys. ++ * ++ * Either the touch bar itself or usbhid generate a slew of key-down ++ * events for all the meta keys. None of which we're at all interested ++ * in. ++ */ ++ slot = appletb_tb_key_to_slot(usage->code); ++ if (slot < 0) ++ return 0; ++ ++ reset_inactivity_timer(kbd); ++ ++ translation = sparse_keymap_entry_from_scancode(input, usage->code); ++ ++ if (translation && kbd->current_mode == APPLETB_KBD_MODE_SPCL) { ++ input_event(input, usage->type, translation->keycode, value); ++ ++ return 1; ++ } ++ ++ return kbd->current_mode == APPLETB_KBD_MODE_OFF; ++} ++ ++static void appletb_kbd_inp_event(struct input_handle *handle, unsigned int type, ++ unsigned int code, int value) ++{ ++ struct appletb_kbd *kbd = handle->private; ++ ++ reset_inactivity_timer(kbd); ++ ++ if (type == EV_KEY && code == KEY_FN && appletb_tb_fn_toggle) { ++ if (value == 1) { ++ kbd->saved_mode = kbd->current_mode; ++ if (kbd->current_mode == APPLETB_KBD_MODE_SPCL) ++ appletb_kbd_set_mode(kbd, APPLETB_KBD_MODE_FN); ++ else if (kbd->current_mode == APPLETB_KBD_MODE_FN) ++ appletb_kbd_set_mode(kbd, APPLETB_KBD_MODE_SPCL); ++ } else if (value == 0) { ++ if (kbd->saved_mode != kbd->current_mode) ++ appletb_kbd_set_mode(kbd, kbd->saved_mode); ++ } ++ } ++} ++ ++static int appletb_kbd_inp_connect(struct input_handler *handler, ++ struct input_dev *dev, ++ const struct input_device_id *id) ++{ ++ struct appletb_kbd *kbd = handler->private; ++ struct input_handle *handle; ++ int rc; ++ ++ if (id->driver_info == APPLETB_DEVID_KEYBOARD) { ++ handle = &kbd->kbd_handle; ++ handle->name = "tbkbd"; ++ } else if (id->driver_info == APPLETB_DEVID_TRACKPAD) { ++ handle = &kbd->tpd_handle; ++ handle->name = "tbtpd"; ++ } else { ++ return -ENOENT; ++ } ++ ++ if (handle->dev) ++ return -EEXIST; ++ ++ handle->open = 0; ++ handle->dev = input_get_device(dev); ++ handle->handler = handler; ++ handle->private = kbd; ++ ++ rc = input_register_handle(handle); ++ if (rc) ++ goto err_free_dev; ++ ++ rc = input_open_device(handle); ++ if (rc) ++ goto err_unregister_handle; ++ ++ return 0; ++ ++ err_unregister_handle: ++ input_unregister_handle(handle); ++ err_free_dev: ++ input_put_device(handle->dev); ++ handle->dev = NULL; ++ return rc; ++} ++ ++static void appletb_kbd_inp_disconnect(struct input_handle *handle) ++{ ++ input_close_device(handle); ++ input_unregister_handle(handle); ++ ++ input_put_device(handle->dev); ++ handle->dev = NULL; ++} ++ ++static int appletb_kbd_input_configured(struct hid_device *hdev, struct hid_input *hidinput) ++{ ++ int idx; ++ struct input_dev *input = hidinput->input; ++ ++ /* ++ * Clear various input capabilities that are blindly set by the hid ++ * driver (usbkbd.c) ++ */ ++ memset(input->evbit, 0, sizeof(input->evbit)); ++ memset(input->keybit, 0, sizeof(input->keybit)); ++ memset(input->ledbit, 0, sizeof(input->ledbit)); ++ ++ __set_bit(EV_REP, input->evbit); ++ ++ sparse_keymap_setup(input, appletb_kbd_keymap, NULL); ++ ++ for (idx = 0; appletb_kbd_keymap[idx].type != KE_END; idx++) ++ input_set_capability(input, EV_KEY, appletb_kbd_keymap[idx].code); ++ ++ return 0; ++} ++ ++static const struct input_device_id appletb_kbd_input_devices[] = { ++ { ++ .flags = INPUT_DEVICE_ID_MATCH_BUS | ++ INPUT_DEVICE_ID_MATCH_VENDOR | ++ INPUT_DEVICE_ID_MATCH_KEYBIT, ++ .bustype = BUS_USB, ++ .vendor = USB_VENDOR_ID_APPLE, ++ .keybit = { [BIT_WORD(KEY_FN)] = BIT_MASK(KEY_FN) }, ++ .driver_info = APPLETB_DEVID_KEYBOARD, ++ }, ++ { ++ .flags = INPUT_DEVICE_ID_MATCH_BUS | ++ INPUT_DEVICE_ID_MATCH_VENDOR | ++ INPUT_DEVICE_ID_MATCH_KEYBIT, ++ .bustype = BUS_USB, ++ .vendor = USB_VENDOR_ID_APPLE, ++ .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, ++ .driver_info = APPLETB_DEVID_TRACKPAD, ++ }, ++ { } ++}; ++ ++static bool appletb_kbd_match_internal_device(struct input_handler *handler, ++ struct input_dev *inp_dev) ++{ ++ struct device *dev = &inp_dev->dev; ++ ++ /* in kernel: dev && !is_usb_device(dev) */ ++ while (dev && !(dev->type && dev->type->name && ++ !strcmp(dev->type->name, "usb_device"))) ++ dev = dev->parent; ++ ++ /* ++ * Apple labels all their internal keyboards and trackpads as such, ++ * instead of maintaining an ever expanding list of product-id's we ++ * just look at the device's product name. ++ */ ++ if (dev) ++ return !!strstr(to_usb_device(dev)->product, "Internal Keyboard"); ++ ++ return false; ++} ++ ++static int appletb_kbd_probe(struct hid_device *hdev, const struct hid_device_id *id) ++{ ++ struct appletb_kbd *kbd; ++ struct device *dev = &hdev->dev; ++ struct hid_field *mode_field; ++ int ret; ++ ++ ret = hid_parse(hdev); ++ if (ret) ++ return dev_err_probe(dev, ret, "HID parse failed\n"); ++ ++ mode_field = hid_find_field(hdev, HID_OUTPUT_REPORT, ++ HID_GD_KEYBOARD, HID_USAGE_MODE); ++ if (!mode_field) ++ return -ENODEV; ++ ++ kbd = devm_kzalloc(dev, sizeof(*kbd), GFP_KERNEL); ++ if (!kbd) ++ return -ENOMEM; ++ ++ kbd->mode_field = mode_field; ++ ++ ret = hid_hw_start(hdev, HID_CONNECT_HIDINPUT); ++ if (ret) ++ return dev_err_probe(dev, ret, "HID hw start failed\n"); ++ ++ ret = hid_hw_open(hdev); ++ if (ret) { ++ dev_err_probe(dev, ret, "HID hw open failed\n"); ++ goto stop_hw; ++ } ++ ++ kbd->backlight_dev = backlight_device_get_by_name("appletb_backlight"); ++ if (!kbd->backlight_dev) ++ dev_err_probe(dev, ret, "Failed to get backlight device\n"); ++ else { ++ backlight_device_set_brightness(kbd->backlight_dev, 2); ++ timer_setup(&kbd->inactivity_timer, appletb_inactivity_timer, 0); ++ mod_timer(&kbd->inactivity_timer, jiffies + msecs_to_jiffies(appletb_tb_dim_timeout * 1000)); ++ } ++ ++ kbd->inp_handler.event = appletb_kbd_inp_event; ++ kbd->inp_handler.connect = appletb_kbd_inp_connect; ++ kbd->inp_handler.disconnect = appletb_kbd_inp_disconnect; ++ kbd->inp_handler.name = "appletb"; ++ kbd->inp_handler.id_table = appletb_kbd_input_devices; ++ kbd->inp_handler.match = appletb_kbd_match_internal_device; ++ kbd->inp_handler.private = kbd; ++ ++ ret = input_register_handler(&kbd->inp_handler); ++ if (ret) { ++ dev_err_probe(dev, ret, "Unable to register keyboard handler\n"); ++ goto close_hw; ++ } ++ ++ ret = appletb_kbd_set_mode(kbd, appletb_tb_def_mode); ++ if (ret) { ++ dev_err_probe(dev, ret, "Failed to set touchbar mode\n"); ++ goto close_hw; ++ } ++ ++ hid_set_drvdata(hdev, kbd); ++ ++ return 0; ++ ++close_hw: ++ hid_hw_close(hdev); ++stop_hw: ++ hid_hw_stop(hdev); ++ return ret; ++} ++ ++static void appletb_kbd_remove(struct hid_device *hdev) ++{ ++ struct appletb_kbd *kbd = hid_get_drvdata(hdev); ++ ++ appletb_kbd_set_mode(kbd, APPLETB_KBD_MODE_OFF); ++ ++ input_unregister_handler(&kbd->inp_handler); ++ del_timer_sync(&kbd->inactivity_timer); ++ ++ hid_hw_close(hdev); ++ hid_hw_stop(hdev); ++} ++ ++#ifdef CONFIG_PM ++static int appletb_kbd_suspend(struct hid_device *hdev, pm_message_t msg) ++{ ++ struct appletb_kbd *kbd = hid_get_drvdata(hdev); ++ ++ kbd->saved_mode = kbd->current_mode; ++ appletb_kbd_set_mode(kbd, APPLETB_KBD_MODE_OFF); ++ ++ return 0; ++} ++ ++static int appletb_kbd_reset_resume(struct hid_device *hdev) ++{ ++ struct appletb_kbd *kbd = hid_get_drvdata(hdev); ++ ++ appletb_kbd_set_mode(kbd, kbd->saved_mode); ++ ++ return 0; ++} ++#endif ++ ++static const struct hid_device_id appletb_kbd_hid_ids[] = { ++ /* MacBook Pro's 2018, 2019, with T2 chip: iBridge Display */ ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) }, ++ { } ++}; ++MODULE_DEVICE_TABLE(hid, appletb_kbd_hid_ids); ++ ++static struct hid_driver appletb_kbd_hid_driver = { ++ .name = "hid-appletb-kbd", ++ .id_table = appletb_kbd_hid_ids, ++ .probe = appletb_kbd_probe, ++ .remove = appletb_kbd_remove, ++ .event = appletb_kbd_hid_event, ++ .input_configured = appletb_kbd_input_configured, ++#ifdef CONFIG_PM ++ .suspend = appletb_kbd_suspend, ++ .reset_resume = appletb_kbd_reset_resume, ++#endif ++ .driver.dev_groups = appletb_kbd_groups, ++}; ++module_hid_driver(appletb_kbd_hid_driver); ++ ++/* The backlight driver should be loaded before the keyboard driver is initialised*/ ++MODULE_SOFTDEP("pre: hid_appletb_bl"); ++ ++MODULE_AUTHOR("Ronald Tschalär"); ++MODULE_AUTHOR("Kerem Karabay "); ++MODULE_DESCRIPTION("MacBookPro Touch Bar Keyboard Mode Driver"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-asus-ally.c b/drivers/hid/hid-asus-ally.c new file mode 100644 index 000000000000..d59316001f50 @@ -8331,8 +11358,208 @@ index 1f47fda809b9..6c2df0d37b3b 100644 #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY 0x1abe #define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X 0x1b4c #define USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD 0x196b +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index 785743036647..2c3845cbb451 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -73,6 +73,7 @@ MODULE_LICENSE("GPL"); + #define MT_QUIRK_FORCE_MULTI_INPUT BIT(20) + #define MT_QUIRK_DISABLE_WAKEUP BIT(21) + #define MT_QUIRK_ORIENTATION_INVERT BIT(22) ++#define MT_QUIRK_TOUCH_IS_TIPSTATE BIT(23) + + #define MT_INPUTMODE_TOUCHSCREEN 0x02 + #define MT_INPUTMODE_TOUCHPAD 0x03 +@@ -153,6 +154,7 @@ struct mt_class { + __s32 sn_height; /* Signal/noise ratio for height events */ + __s32 sn_pressure; /* Signal/noise ratio for pressure events */ + __u8 maxcontacts; ++ bool is_direct; /* true for touchscreens */ + bool is_indirect; /* true for touchpads */ + bool export_all_inputs; /* do not ignore mouse, keyboards, etc... */ + }; +@@ -220,6 +222,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app); + #define MT_CLS_GOOGLE 0x0111 + #define MT_CLS_RAZER_BLADE_STEALTH 0x0112 + #define MT_CLS_SMART_TECH 0x0113 ++#define MT_CLS_APPLE_TOUCHBAR 0x0114 + #define MT_CLS_SIS 0x0457 + + #define MT_DEFAULT_MAXCONTACT 10 +@@ -405,6 +408,13 @@ static const struct mt_class mt_classes[] = { + MT_QUIRK_CONTACT_CNT_ACCURATE | + MT_QUIRK_SEPARATE_APP_REPORT, + }, ++ { .name = MT_CLS_APPLE_TOUCHBAR, ++ .quirks = MT_QUIRK_HOVERING | ++ MT_QUIRK_TOUCH_IS_TIPSTATE | ++ MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE, ++ .is_direct = true, ++ .maxcontacts = 11, ++ }, + { .name = MT_CLS_SIS, + .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | + MT_QUIRK_ALWAYS_VALID | +@@ -503,9 +513,6 @@ static void mt_feature_mapping(struct hid_device *hdev, + if (!td->maxcontacts && + field->logical_maximum <= MT_MAX_MAXCONTACT) + td->maxcontacts = field->logical_maximum; +- if (td->mtclass.maxcontacts) +- /* check if the maxcontacts is given by the class */ +- td->maxcontacts = td->mtclass.maxcontacts; + + break; + case HID_DG_BUTTONTYPE: +@@ -579,13 +586,13 @@ static struct mt_application *mt_allocate_application(struct mt_device *td, + mt_application->application = application; + INIT_LIST_HEAD(&mt_application->mt_usages); + +- if (application == HID_DG_TOUCHSCREEN) ++ if (application == HID_DG_TOUCHSCREEN && !td->mtclass.is_indirect) + mt_application->mt_flags |= INPUT_MT_DIRECT; + + /* + * Model touchscreens providing buttons as touchpads. + */ +- if (application == HID_DG_TOUCHPAD) { ++ if (application == HID_DG_TOUCHPAD && !td->mtclass.is_direct) { + mt_application->mt_flags |= INPUT_MT_POINTER; + td->inputmode_value = MT_INPUTMODE_TOUCHPAD; + } +@@ -649,7 +656,9 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td, + + if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) { + for (n = 0; n < field->report_count; n++) { +- if (field->usage[n].hid == HID_DG_CONTACTID) { ++ unsigned int hid = field->usage[n].hid; ++ ++ if (hid == HID_DG_CONTACTID || hid == HID_DG_TRANSDUCER_INDEX) { + rdata->is_mt_collection = true; + break; + } +@@ -821,6 +830,15 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, + + MT_STORE_FIELD(confidence_state); + return 1; ++ case HID_DG_TOUCH: ++ /* ++ * Legacy devices use TIPSWITCH and not TOUCH. ++ * Let's just ignore this field unless the quirk is set. ++ */ ++ if (!(cls->quirks & MT_QUIRK_TOUCH_IS_TIPSTATE)) ++ return -1; ++ ++ fallthrough; + case HID_DG_TIPSWITCH: + if (field->application != HID_GD_SYSTEM_MULTIAXIS) + input_set_capability(hi->input, +@@ -828,6 +846,7 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, + MT_STORE_FIELD(tip_state); + return 1; + case HID_DG_CONTACTID: ++ case HID_DG_TRANSDUCER_INDEX: + MT_STORE_FIELD(contactid); + app->touches_by_report++; + return 1; +@@ -883,10 +902,6 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, + case HID_DG_CONTACTMAX: + /* contact max are global to the report */ + return -1; +- case HID_DG_TOUCH: +- /* Legacy devices use TIPSWITCH and not TOUCH. +- * Let's just ignore this field. */ +- return -1; + } + /* let hid-input decide for the others */ + return 0; +@@ -1314,6 +1329,10 @@ static int mt_touch_input_configured(struct hid_device *hdev, + struct input_dev *input = hi->input; + int ret; + ++ /* check if the maxcontacts is given by the class */ ++ if (cls->maxcontacts) ++ td->maxcontacts = cls->maxcontacts; ++ + if (!td->maxcontacts) + td->maxcontacts = MT_DEFAULT_MAXCONTACT; + +@@ -1321,6 +1340,9 @@ static int mt_touch_input_configured(struct hid_device *hdev, + if (td->serial_maybe) + mt_post_parse_default_settings(td, app); + ++ if (cls->is_direct) ++ app->mt_flags |= INPUT_MT_DIRECT; ++ + if (cls->is_indirect) + app->mt_flags |= INPUT_MT_POINTER; + +@@ -1770,6 +1792,15 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) + } + } + ++ ret = hid_parse(hdev); ++ if (ret != 0) ++ return ret; ++ ++ if (mtclass->name == MT_CLS_APPLE_TOUCHBAR && ++ !hid_find_field(hdev, HID_INPUT_REPORT, ++ HID_DG_TOUCHPAD, HID_DG_TRANSDUCER_INDEX)) ++ return -ENODEV; ++ + td = devm_kzalloc(&hdev->dev, sizeof(struct mt_device), GFP_KERNEL); + if (!td) { + dev_err(&hdev->dev, "cannot allocate multitouch data\n"); +@@ -1817,10 +1848,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) + + timer_setup(&td->release_timer, mt_expired_timeout, 0); + +- ret = hid_parse(hdev); +- if (ret != 0) +- return ret; +- + if (mtclass->quirks & MT_QUIRK_FIX_CONST_CONTACT_ID) + mt_fix_const_fields(hdev, HID_DG_CONTACTID); + +@@ -2305,6 +2332,11 @@ static const struct hid_device_id mt_devices[] = { + MT_USB_DEVICE(USB_VENDOR_ID_XIROKU, + USB_DEVICE_ID_XIROKU_CSR2) }, + ++ /* Apple Touch Bars */ ++ { .driver_data = MT_CLS_APPLE_TOUCHBAR, ++ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, ++ USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) }, ++ + /* Google MT devices */ + { .driver_data = MT_CLS_GOOGLE, + HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE, +diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c +index e0bbf0c6345d..7c576d6540fe 100644 +--- a/drivers/hid/hid-quirks.c ++++ b/drivers/hid/hid-quirks.c +@@ -328,8 +328,6 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021) }, +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT) }, +- { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) }, + #endif + #if IS_ENABLED(CONFIG_HID_APPLEIR) + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) }, +@@ -338,6 +336,12 @@ static const struct hid_device_id hid_have_special_driver[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) }, + #endif ++#if IS_ENABLED(CONFIG_HID_APPLETB_BL) ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT) }, ++#endif ++#if IS_ENABLED(CONFIG_HID_APPLETB_KBD) ++ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) }, ++#endif + #if IS_ENABLED(CONFIG_HID_ASUS) + { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) }, diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig -index ea13ea482a63..1c086d79da1c 100644 +index dd376602f3f1..825966b9d109 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -2075,6 +2075,17 @@ config SENSORS_SCH5636 @@ -8365,4021 +11592,6170 @@ index b827b92f2a78..b13f2b293838 100644 obj-$(CONFIG_SENSORS_SPD5118) += spd5118.o obj-$(CONFIG_SENSORS_STTS751) += stts751.o obj-$(CONFIG_SENSORS_SURFACE_FAN)+= surface_fan.o -diff --git a/drivers/hwmon/steamdeck-hwmon.c b/drivers/hwmon/steamdeck-hwmon.c -new file mode 100644 -index 000000000000..9d0a5471b181 ---- /dev/null -+++ b/drivers/hwmon/steamdeck-hwmon.c -@@ -0,0 +1,294 @@ -+// SPDX-License-Identifier: GPL-2.0+ -+/* -+ * Steam Deck EC sensors driver -+ * -+ * Copyright (C) 2021-2022 Valve Corporation -+ */ -+ +diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c +index fc6d6a9053ce..698f44794453 100644 +--- a/drivers/hwmon/applesmc.c ++++ b/drivers/hwmon/applesmc.c +@@ -6,6 +6,7 @@ + * + * Copyright (C) 2007 Nicolas Boichat + * Copyright (C) 2010 Henrik Rydberg ++ * Copyright (C) 2019 Paul Pawlowski + * + * Based on hdaps.c driver: + * Copyright (C) 2005 Robert Love +@@ -18,7 +19,7 @@ + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + + #include +-#include +#include -+#include -+#include + #include + #include + #include +@@ -35,12 +36,24 @@ + #include + + /* data port used by Apple SMC */ +-#define APPLESMC_DATA_PORT 0x300 ++#define APPLESMC_DATA_PORT 0 + /* command/status port used by Apple SMC */ +-#define APPLESMC_CMD_PORT 0x304 ++#define APPLESMC_CMD_PORT 4 + + #define APPLESMC_NR_PORTS 32 /* 0x300-0x31f */ + ++#define APPLESMC_IOMEM_KEY_DATA 0 ++#define APPLESMC_IOMEM_KEY_STATUS 0x4005 ++#define APPLESMC_IOMEM_KEY_NAME 0x78 ++#define APPLESMC_IOMEM_KEY_DATA_LEN 0x7D ++#define APPLESMC_IOMEM_KEY_SMC_ID 0x7E ++#define APPLESMC_IOMEM_KEY_CMD 0x7F ++#define APPLESMC_IOMEM_MIN_SIZE 0x4006 ++ ++#define APPLESMC_IOMEM_KEY_TYPE_CODE 0 ++#define APPLESMC_IOMEM_KEY_TYPE_DATA_LEN 5 ++#define APPLESMC_IOMEM_KEY_TYPE_FLAGS 6 ++ + #define APPLESMC_MAX_DATA_LENGTH 32 + + /* Apple SMC status bits */ +@@ -74,6 +87,7 @@ + #define FAN_ID_FMT "F%dID" /* r-o char[16] */ + + #define TEMP_SENSOR_TYPE "sp78" ++#define FLOAT_TYPE "flt " + + /* List of keys used to read/write fan speeds */ + static const char *const fan_speed_fmt[] = { +@@ -83,6 +97,7 @@ static const char *const fan_speed_fmt[] = { + "F%dSf", /* safe speed - not all models */ + "F%dTg", /* target speed (manual: rw) */ + }; ++#define FAN_MANUAL_FMT "F%dMd" + + #define INIT_TIMEOUT_MSECS 5000 /* wait up to 5s for device init ... */ + #define INIT_WAIT_MSECS 50 /* ... in 50ms increments */ +@@ -119,7 +134,7 @@ struct applesmc_entry { + }; + + /* Register lookup and registers common to all SMCs */ +-static struct applesmc_registers { ++struct applesmc_registers { + struct mutex mutex; /* register read/write mutex */ + unsigned int key_count; /* number of SMC registers */ + unsigned int fan_count; /* number of fans */ +@@ -133,26 +148,38 @@ static struct applesmc_registers { + bool init_complete; /* true when fully initialized */ + struct applesmc_entry *cache; /* cached key entries */ + const char **index; /* temperature key index */ +-} smcreg = { +- .mutex = __MUTEX_INITIALIZER(smcreg.mutex), + }; + +-static const int debug; +-static struct platform_device *pdev; +-static s16 rest_x; +-static s16 rest_y; +-static u8 backlight_state[2]; ++struct applesmc_device { ++ struct acpi_device *dev; ++ struct device *ldev; ++ struct applesmc_registers reg; + +-static struct device *hwmon_dev; +-static struct input_dev *applesmc_idev; ++ bool port_base_set, iomem_base_set; ++ u16 port_base; ++ u8 *__iomem iomem_base; ++ u32 iomem_base_addr, iomem_base_size; + +-/* +- * Last index written to key_at_index sysfs file, and value to use for all other +- * key_at_index_* sysfs files. +- */ +-static unsigned int key_at_index; ++ s16 rest_x; ++ s16 rest_y; + -+#define STEAMDECK_HWMON_NAME "steamdeck-hwmon" ++ u8 backlight_state[2]; + -+struct steamdeck_hwmon { -+ struct acpi_device *adev; ++ struct device *hwmon_dev; ++ struct input_dev *idev; ++ ++ /* ++ * Last index written to key_at_index sysfs file, and value to use for all other ++ * key_at_index_* sysfs files. ++ */ ++ unsigned int key_at_index; + +-static struct workqueue_struct *applesmc_led_wq; ++ struct workqueue_struct *backlight_wq; ++ struct work_struct backlight_work; ++ struct led_classdev backlight_dev; +}; + -+static long -+steamdeck_hwmon_get(struct steamdeck_hwmon *sd, const char *method) -+{ -+ unsigned long long val; -+ if (ACPI_FAILURE(acpi_evaluate_integer(sd->adev->handle, -+ (char *)method, NULL, &val))) -+ return -EIO; ++static const int debug; + + /* + * Wait for specific status bits with a mask on the SMC. +@@ -162,7 +189,7 @@ static struct workqueue_struct *applesmc_led_wq; + * run out past 500ms. + */ + +-static int wait_status(u8 val, u8 mask) ++static int port_wait_status(struct applesmc_device *smc, u8 val, u8 mask) + { + u8 status; + int us; +@@ -170,7 +197,7 @@ static int wait_status(u8 val, u8 mask) + + us = APPLESMC_MIN_WAIT; + for (i = 0; i < 24 ; i++) { +- status = inb(APPLESMC_CMD_PORT); ++ status = inb(smc->port_base + APPLESMC_CMD_PORT); + if ((status & mask) == val) + return 0; + usleep_range(us, us * 2); +@@ -180,13 +207,13 @@ static int wait_status(u8 val, u8 mask) + return -EIO; + } + +-/* send_byte - Write to SMC data port. Callers must hold applesmc_lock. */ ++/* port_send_byte - Write to SMC data port. Callers must hold applesmc_lock. */ + +-static int send_byte(u8 cmd, u16 port) ++static int port_send_byte(struct applesmc_device *smc, u8 cmd, u16 port) + { + int status; + +- status = wait_status(0, SMC_STATUS_IB_CLOSED); ++ status = port_wait_status(smc, 0, SMC_STATUS_IB_CLOSED); + if (status) + return status; + /* +@@ -195,24 +222,25 @@ static int send_byte(u8 cmd, u16 port) + * this extra read may not happen if status returns both + * simultaneously and this would appear to be required. + */ +- status = wait_status(SMC_STATUS_BUSY, SMC_STATUS_BUSY); ++ status = port_wait_status(smc, SMC_STATUS_BUSY, SMC_STATUS_BUSY); + if (status) + return status; + +- outb(cmd, port); ++ outb(cmd, smc->port_base + port); + return 0; + } + +-/* send_command - Write a command to the SMC. Callers must hold applesmc_lock. */ ++/* port_send_command - Write a command to the SMC. Callers must hold applesmc_lock. */ + +-static int send_command(u8 cmd) ++static int port_send_command(struct applesmc_device *smc, u8 cmd) + { + int ret; + +- ret = wait_status(0, SMC_STATUS_IB_CLOSED); ++ ret = port_wait_status(smc, 0, SMC_STATUS_IB_CLOSED); + if (ret) + return ret; +- outb(cmd, APPLESMC_CMD_PORT); + -+ return val; ++ outb(cmd, smc->port_base + APPLESMC_CMD_PORT); + return 0; + } + +@@ -222,110 +250,304 @@ static int send_command(u8 cmd) + * If busy is stuck high after the command then the SMC is jammed. + */ + +-static int smc_sane(void) ++static int port_smc_sane(struct applesmc_device *smc) + { + int ret; + +- ret = wait_status(0, SMC_STATUS_BUSY); ++ ret = port_wait_status(smc, 0, SMC_STATUS_BUSY); + if (!ret) + return ret; +- ret = send_command(APPLESMC_READ_CMD); ++ ret = port_send_command(smc, APPLESMC_READ_CMD); + if (ret) + return ret; +- return wait_status(0, SMC_STATUS_BUSY); ++ return port_wait_status(smc, 0, SMC_STATUS_BUSY); + } + +-static int send_argument(const char *key) ++static int port_send_argument(struct applesmc_device *smc, const char *key) + { + int i; + + for (i = 0; i < 4; i++) +- if (send_byte(key[i], APPLESMC_DATA_PORT)) ++ if (port_send_byte(smc, key[i], APPLESMC_DATA_PORT)) + return -EIO; + return 0; + } + +-static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len) ++static int port_read_smc(struct applesmc_device *smc, u8 cmd, const char *key, ++ u8 *buffer, u8 len) + { + u8 status, data = 0; + int i; + int ret; + +- ret = smc_sane(); ++ ret = port_smc_sane(smc); + if (ret) + return ret; + +- if (send_command(cmd) || send_argument(key)) { ++ if (port_send_command(smc, cmd) || port_send_argument(smc, key)) { + pr_warn("%.4s: read arg fail\n", key); + return -EIO; + } + + /* This has no effect on newer (2012) SMCs */ +- if (send_byte(len, APPLESMC_DATA_PORT)) { ++ if (port_send_byte(smc, len, APPLESMC_DATA_PORT)) { + pr_warn("%.4s: read len fail\n", key); + return -EIO; + } + + for (i = 0; i < len; i++) { +- if (wait_status(SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY, ++ if (port_wait_status(smc, ++ SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY, + SMC_STATUS_AWAITING_DATA | SMC_STATUS_BUSY)) { + pr_warn("%.4s: read data[%d] fail\n", key, i); + return -EIO; + } +- buffer[i] = inb(APPLESMC_DATA_PORT); ++ buffer[i] = inb(smc->port_base + APPLESMC_DATA_PORT); + } + + /* Read the data port until bit0 is cleared */ + for (i = 0; i < 16; i++) { + udelay(APPLESMC_MIN_WAIT); +- status = inb(APPLESMC_CMD_PORT); ++ status = inb(smc->port_base + APPLESMC_CMD_PORT); + if (!(status & SMC_STATUS_AWAITING_DATA)) + break; +- data = inb(APPLESMC_DATA_PORT); ++ data = inb(smc->port_base + APPLESMC_DATA_PORT); + } + if (i) + pr_warn("flushed %d bytes, last value is: %d\n", i, data); + +- return wait_status(0, SMC_STATUS_BUSY); ++ return port_wait_status(smc, 0, SMC_STATUS_BUSY); + } + +-static int write_smc(u8 cmd, const char *key, const u8 *buffer, u8 len) ++static int port_write_smc(struct applesmc_device *smc, u8 cmd, const char *key, ++ const u8 *buffer, u8 len) + { + int i; + int ret; + +- ret = smc_sane(); ++ ret = port_smc_sane(smc); + if (ret) + return ret; + +- if (send_command(cmd) || send_argument(key)) { ++ if (port_send_command(smc, cmd) || port_send_argument(smc, key)) { + pr_warn("%s: write arg fail\n", key); + return -EIO; + } + +- if (send_byte(len, APPLESMC_DATA_PORT)) { ++ if (port_send_byte(smc, len, APPLESMC_DATA_PORT)) { + pr_warn("%.4s: write len fail\n", key); + return -EIO; + } + + for (i = 0; i < len; i++) { +- if (send_byte(buffer[i], APPLESMC_DATA_PORT)) { ++ if (port_send_byte(smc, buffer[i], APPLESMC_DATA_PORT)) { + pr_warn("%s: write data fail\n", key); + return -EIO; + } + } + +- return wait_status(0, SMC_STATUS_BUSY); ++ return port_wait_status(smc, 0, SMC_STATUS_BUSY); + } + +-static int read_register_count(unsigned int *count) ++static int port_get_smc_key_info(struct applesmc_device *smc, ++ const char *key, struct applesmc_entry *info) + { +- __be32 be; + int ret; ++ u8 raw[6]; + +- ret = read_smc(APPLESMC_READ_CMD, KEY_COUNT_KEY, (u8 *)&be, 4); ++ ret = port_read_smc(smc, APPLESMC_GET_KEY_TYPE_CMD, key, raw, 6); + if (ret) + return ret; ++ info->len = raw[0]; ++ memcpy(info->type, &raw[1], 4); ++ info->flags = raw[5]; ++ return 0; +} + -+static int -+steamdeck_hwmon_read(struct device *dev, enum hwmon_sensor_types type, -+ u32 attr, int channel, long *out) -+{ -+ struct steamdeck_hwmon *sd = dev_get_drvdata(dev); -+ -+ switch (type) { -+ case hwmon_curr: -+ if (attr != hwmon_curr_input) -+ return -EOPNOTSUPP; -+ -+ *out = steamdeck_hwmon_get(sd, "PDAM"); -+ if (*out < 0) -+ return *out; -+ break; -+ case hwmon_in: -+ if (attr != hwmon_in_input) -+ return -EOPNOTSUPP; -+ -+ *out = steamdeck_hwmon_get(sd, "PDVL"); -+ if (*out < 0) -+ return *out; -+ break; -+ case hwmon_temp: -+ if (attr != hwmon_temp_input) -+ return -EOPNOTSUPP; + -+ *out = steamdeck_hwmon_get(sd, "BATT"); -+ if (*out < 0) -+ return *out; -+ /* -+ * Assuming BATT returns deg C we need to mutiply it -+ * by 1000 to convert to mC -+ */ -+ *out *= 1000; -+ break; -+ case hwmon_fan: -+ switch (attr) { -+ case hwmon_fan_input: -+ *out = steamdeck_hwmon_get(sd, "FANR"); -+ if (*out < 0) -+ return *out; -+ break; -+ case hwmon_fan_target: -+ *out = steamdeck_hwmon_get(sd, "FSSR"); -+ if (*out < 0) -+ return *out; -+ break; -+ case hwmon_fan_fault: -+ *out = steamdeck_hwmon_get(sd, "FANC"); -+ if (*out < 0) -+ return *out; -+ /* -+ * FANC (Fan check): -+ * 0: Abnormal -+ * 1: Normal -+ */ -+ *out = !*out; -+ break; -+ default: -+ return -EOPNOTSUPP; -+ } -+ break; -+ default: -+ return -EOPNOTSUPP; -+ } ++/* ++ * MMIO based communication. ++ * TODO: Use updated mechanism for cmd timeout/retry ++ */ + -+ return 0; ++static void iomem_clear_status(struct applesmc_device *smc) ++{ ++ if (ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_STATUS)) ++ iowrite8(0, smc->iomem_base + APPLESMC_IOMEM_KEY_STATUS); +} + -+static int -+steamdeck_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, -+ u32 attr, int channel, const char **str) ++static int iomem_wait_read(struct applesmc_device *smc) +{ -+ switch (type) { -+ /* -+ * These two aren't, strictly speaking, measured. EC -+ * firmware just reports what PD negotiation resulted -+ * in. -+ */ -+ case hwmon_curr: -+ *str = "PD Contract Current"; -+ break; -+ case hwmon_in: -+ *str = "PD Contract Voltage"; -+ break; -+ case hwmon_temp: -+ *str = "Battery Temp"; -+ break; -+ case hwmon_fan: -+ *str = "System Fan"; -+ break; -+ default: -+ return -EOPNOTSUPP; ++ u8 status; ++ int us; ++ int i; ++ ++ us = APPLESMC_MIN_WAIT; ++ for (i = 0; i < 24 ; i++) { ++ status = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_STATUS); ++ if (status & 0x20) ++ return 0; ++ usleep_range(us, us * 2); ++ if (i > 9) ++ us <<= 1; + } + -+ return 0; ++ dev_warn(smc->ldev, "%s... timeout\n", __func__); ++ return -EIO; +} + -+static int -+steamdeck_hwmon_write(struct device *dev, enum hwmon_sensor_types type, -+ u32 attr, int channel, long val) ++static int iomem_read_smc(struct applesmc_device *smc, u8 cmd, const char *key, ++ u8 *buffer, u8 len) +{ -+ struct steamdeck_hwmon *sd = dev_get_drvdata(dev); ++ u8 err, remote_len; ++ u32 key_int = *((u32 *) key); + -+ if (type != hwmon_fan || -+ attr != hwmon_fan_target) -+ return -EOPNOTSUPP; ++ iomem_clear_status(smc); ++ iowrite32(key_int, smc->iomem_base + APPLESMC_IOMEM_KEY_NAME); ++ iowrite32(0, smc->iomem_base + APPLESMC_IOMEM_KEY_SMC_ID); ++ iowrite32(cmd, smc->iomem_base + APPLESMC_IOMEM_KEY_CMD); + -+ val = clamp_val(val, 0, 7300); ++ if (iomem_wait_read(smc)) ++ return -EIO; + -+ if (ACPI_FAILURE(acpi_execute_simple_method(sd->adev->handle, -+ "FANS", val))) ++ err = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_CMD); ++ if (err != 0) { ++ dev_warn(smc->ldev, "read_smc_mmio(%x %8x/%.4s) failed: %u\n", ++ cmd, key_int, key, err); + return -EIO; ++ } ++ ++ if (cmd == APPLESMC_READ_CMD) { ++ remote_len = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_DATA_LEN); ++ if (remote_len != len) { ++ dev_warn(smc->ldev, ++ "read_smc_mmio(%x %8x/%.4s) failed: buffer length mismatch (remote = %u, requested = %u)\n", ++ cmd, key_int, key, remote_len, len); ++ return -EINVAL; ++ } ++ } else { ++ remote_len = len; ++ } + ++ memcpy_fromio(buffer, smc->iomem_base + APPLESMC_IOMEM_KEY_DATA, ++ remote_len); ++ ++ dev_dbg(smc->ldev, "read_smc_mmio(%x %8x/%.4s): buflen=%u reslen=%u\n", ++ cmd, key_int, key, len, remote_len); ++ print_hex_dump_bytes("read_smc_mmio(): ", DUMP_PREFIX_NONE, buffer, remote_len); + return 0; +} + -+static umode_t -+steamdeck_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, -+ u32 attr, int channel) ++static int iomem_get_smc_key_type(struct applesmc_device *smc, const char *key, ++ struct applesmc_entry *e) +{ -+ if (type == hwmon_fan && -+ attr == hwmon_fan_target) -+ return 0644; ++ u8 err; ++ u8 cmd = APPLESMC_GET_KEY_TYPE_CMD; ++ u32 key_int = *((u32 *) key); + -+ return 0444; -+} ++ iomem_clear_status(smc); ++ iowrite32(key_int, smc->iomem_base + APPLESMC_IOMEM_KEY_NAME); ++ iowrite32(0, smc->iomem_base + APPLESMC_IOMEM_KEY_SMC_ID); ++ iowrite32(cmd, smc->iomem_base + APPLESMC_IOMEM_KEY_CMD); + -+static const struct hwmon_channel_info *steamdeck_hwmon_info[] = { -+ HWMON_CHANNEL_INFO(in, -+ HWMON_I_INPUT | HWMON_I_LABEL), -+ HWMON_CHANNEL_INFO(curr, -+ HWMON_C_INPUT | HWMON_C_LABEL), -+ HWMON_CHANNEL_INFO(temp, -+ HWMON_T_INPUT | HWMON_T_LABEL), -+ HWMON_CHANNEL_INFO(fan, -+ HWMON_F_INPUT | HWMON_F_LABEL | -+ HWMON_F_TARGET | HWMON_F_FAULT), -+ NULL -+}; ++ if (iomem_wait_read(smc)) ++ return -EIO; + -+static const struct hwmon_ops steamdeck_hwmon_ops = { -+ .is_visible = steamdeck_hwmon_is_visible, -+ .read = steamdeck_hwmon_read, -+ .read_string = steamdeck_hwmon_read_string, -+ .write = steamdeck_hwmon_write, -+}; ++ err = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_CMD); ++ if (err != 0) { ++ dev_warn(smc->ldev, "get_smc_key_type_mmio(%.4s) failed: %u\n", key, err); ++ return -EIO; ++ } + -+static const struct hwmon_chip_info steamdeck_hwmon_chip_info = { -+ .ops = &steamdeck_hwmon_ops, -+ .info = steamdeck_hwmon_info, -+}; ++ e->len = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_TYPE_DATA_LEN); ++ *((uint32_t *) e->type) = ioread32( ++ smc->iomem_base + APPLESMC_IOMEM_KEY_TYPE_CODE); ++ e->flags = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_TYPE_FLAGS); + ++ dev_dbg(smc->ldev, "get_smc_key_type_mmio(%.4s): len=%u type=%.4s flags=%x\n", ++ key, e->len, e->type, e->flags); ++ return 0; ++} + -+static ssize_t -+steamdeck_hwmon_simple_store(struct device *dev, const char *buf, size_t count, -+ const char *method, -+ unsigned long upper_limit) ++static int iomem_write_smc(struct applesmc_device *smc, u8 cmd, const char *key, ++ const u8 *buffer, u8 len) +{ -+ struct steamdeck_hwmon *sd = dev_get_drvdata(dev); -+ unsigned long value; ++ u8 err; ++ u32 key_int = *((u32 *) key); + -+ if (kstrtoul(buf, 10, &value) || value >= upper_limit) -+ return -EINVAL; ++ iomem_clear_status(smc); ++ iowrite32(key_int, smc->iomem_base + APPLESMC_IOMEM_KEY_NAME); ++ memcpy_toio(smc->iomem_base + APPLESMC_IOMEM_KEY_DATA, buffer, len); ++ iowrite32(len, smc->iomem_base + APPLESMC_IOMEM_KEY_DATA_LEN); ++ iowrite32(0, smc->iomem_base + APPLESMC_IOMEM_KEY_SMC_ID); ++ iowrite32(cmd, smc->iomem_base + APPLESMC_IOMEM_KEY_CMD); + -+ if (ACPI_FAILURE(acpi_execute_simple_method(sd->adev->handle, -+ (char *)method, value))) ++ if (iomem_wait_read(smc)) + return -EIO; + -+ return count; -+} -+ -+static ssize_t -+steamdeck_hwmon_simple_show(struct device *dev, char *buf, -+ const char *method) -+{ -+ struct steamdeck_hwmon *sd = dev_get_drvdata(dev); -+ unsigned long value; -+ -+ value = steamdeck_hwmon_get(sd, method); -+ if (value < 0) -+ return value; ++ err = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_CMD); ++ if (err != 0) { ++ dev_warn(smc->ldev, "write_smc_mmio(%x %.4s) failed: %u\n", cmd, key, err); ++ print_hex_dump_bytes("write_smc_mmio(): ", DUMP_PREFIX_NONE, buffer, len); ++ return -EIO; ++ } + -+ return sprintf(buf, "%ld\n", value); ++ dev_dbg(smc->ldev, "write_smc_mmio(%x %.4s): buflen=%u\n", cmd, key, len); ++ print_hex_dump_bytes("write_smc_mmio(): ", DUMP_PREFIX_NONE, buffer, len); ++ return 0; +} + -+#define STEAMDECK_HWMON_ATTR_RW(_name, _set_method, _get_method, \ -+ _upper_limit) \ -+ static ssize_t _name##_show(struct device *dev, \ -+ struct device_attribute *attr, \ -+ char *buf) \ -+ { \ -+ return steamdeck_hwmon_simple_show(dev, buf, \ -+ _get_method); \ -+ } \ -+ static ssize_t _name##_store(struct device *dev, \ -+ struct device_attribute *attr, \ -+ const char *buf, size_t count) \ -+ { \ -+ return steamdeck_hwmon_simple_store(dev, buf, count, \ -+ _set_method, \ -+ _upper_limit); \ -+ } \ -+ static DEVICE_ATTR_RW(_name) -+ -+STEAMDECK_HWMON_ATTR_RW(max_battery_charge_level, "FCBL", "SFBL", 101); -+STEAMDECK_HWMON_ATTR_RW(max_battery_charge_rate, "CHGR", "GCHR", 101); -+ -+static struct attribute *steamdeck_hwmon_attributes[] = { -+ &dev_attr_max_battery_charge_level.attr, -+ &dev_attr_max_battery_charge_rate.attr, -+ NULL -+}; -+ -+static const struct attribute_group steamdeck_hwmon_group = { -+ .attrs = steamdeck_hwmon_attributes, -+}; -+ -+static const struct attribute_group *steamdeck_hwmon_groups[] = { -+ &steamdeck_hwmon_group, -+ NULL -+}; + -+static int steamdeck_hwmon_probe(struct platform_device *pdev) ++static int read_smc(struct applesmc_device *smc, const char *key, ++ u8 *buffer, u8 len) +{ -+ struct device *dev = &pdev->dev; -+ struct steamdeck_hwmon *sd; -+ struct device *hwmon; -+ -+ sd = devm_kzalloc(dev, sizeof(*sd), GFP_KERNEL); -+ if (!sd) -+ return -ENOMEM; -+ -+ sd->adev = ACPI_COMPANION(dev->parent); -+ hwmon = devm_hwmon_device_register_with_info(dev, -+ "steamdeck_hwmon", -+ sd, -+ &steamdeck_hwmon_chip_info, -+ steamdeck_hwmon_groups); -+ if (IS_ERR(hwmon)) { -+ dev_err(dev, "Failed to register HWMON device"); -+ return PTR_ERR(hwmon); -+ } ++ if (smc->iomem_base_set) ++ return iomem_read_smc(smc, APPLESMC_READ_CMD, key, buffer, len); ++ else ++ return port_read_smc(smc, APPLESMC_READ_CMD, key, buffer, len); ++} + -+ return 0; ++static int write_smc(struct applesmc_device *smc, const char *key, ++ const u8 *buffer, u8 len) ++{ ++ if (smc->iomem_base_set) ++ return iomem_write_smc(smc, APPLESMC_WRITE_CMD, key, buffer, len); ++ else ++ return port_write_smc(smc, APPLESMC_WRITE_CMD, key, buffer, len); +} + -+static const struct platform_device_id steamdeck_hwmon_id_table[] = { -+ { .name = STEAMDECK_HWMON_NAME }, -+ {} -+}; -+MODULE_DEVICE_TABLE(platform, steamdeck_hwmon_id_table); ++static int get_smc_key_by_index(struct applesmc_device *smc, ++ unsigned int index, char *key) ++{ ++ __be32 be; + -+static struct platform_driver steamdeck_hwmon_driver = { -+ .probe = steamdeck_hwmon_probe, -+ .driver = { -+ .name = STEAMDECK_HWMON_NAME, -+ }, -+ .id_table = steamdeck_hwmon_id_table, -+}; -+module_platform_driver(steamdeck_hwmon_driver); ++ be = cpu_to_be32(index); ++ if (smc->iomem_base_set) ++ return iomem_read_smc(smc, APPLESMC_GET_KEY_BY_INDEX_CMD, ++ (const char *) &be, (u8 *) key, 4); ++ else ++ return port_read_smc(smc, APPLESMC_GET_KEY_BY_INDEX_CMD, ++ (const char *) &be, (u8 *) key, 4); ++} + -+MODULE_AUTHOR("Andrey Smirnov "); -+MODULE_DESCRIPTION("Steam Deck EC sensors driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c -index b5cbb57ee5f6..a0f7fa1518c6 100644 ---- a/drivers/input/evdev.c -+++ b/drivers/input/evdev.c -@@ -46,6 +46,7 @@ struct evdev_client { - struct fasync_struct *fasync; - struct evdev *evdev; - struct list_head node; -+ struct rcu_head rcu; - enum input_clock_type clk_type; - bool revoked; - unsigned long *evmasks[EV_CNT]; -@@ -368,13 +369,22 @@ static void evdev_attach_client(struct evdev *evdev, - spin_unlock(&evdev->client_lock); - } - -+static void evdev_reclaim_client(struct rcu_head *rp) ++static int get_smc_key_info(struct applesmc_device *smc, const char *key, ++ struct applesmc_entry *info) +{ -+ struct evdev_client *client = container_of(rp, struct evdev_client, rcu); -+ unsigned int i; -+ for (i = 0; i < EV_CNT; ++i) -+ bitmap_free(client->evmasks[i]); -+ kvfree(client); ++ if (smc->iomem_base_set) ++ return iomem_get_smc_key_type(smc, key, info); ++ else ++ return port_get_smc_key_info(smc, key, info); +} + - static void evdev_detach_client(struct evdev *evdev, - struct evdev_client *client) ++static int read_register_count(struct applesmc_device *smc, ++ unsigned int *count) ++{ ++ __be32 be; ++ int ret; ++ ++ ret = read_smc(smc, KEY_COUNT_KEY, (u8 *)&be, 4); ++ if (ret < 0) ++ return ret; + + *count = be32_to_cpu(be); + return 0; +@@ -338,76 +560,73 @@ static int read_register_count(unsigned int *count) + * All functions below are concurrency safe - callers should NOT hold lock. + */ + +-static int applesmc_read_entry(const struct applesmc_entry *entry, +- u8 *buf, u8 len) ++static int applesmc_read_entry(struct applesmc_device *smc, ++ const struct applesmc_entry *entry, u8 *buf, u8 len) { - spin_lock(&evdev->client_lock); - list_del_rcu(&client->node); - spin_unlock(&evdev->client_lock); -- synchronize_rcu(); -+ call_rcu(&client->rcu, evdev_reclaim_client); + int ret; + + if (entry->len != len) + return -EINVAL; +- mutex_lock(&smcreg.mutex); +- ret = read_smc(APPLESMC_READ_CMD, entry->key, buf, len); +- mutex_unlock(&smcreg.mutex); ++ mutex_lock(&smc->reg.mutex); ++ ret = read_smc(smc, entry->key, buf, len); ++ mutex_unlock(&smc->reg.mutex); + + return ret; } - static int evdev_open_device(struct evdev *evdev) -@@ -427,7 +437,6 @@ static int evdev_release(struct inode *inode, struct file *file) +-static int applesmc_write_entry(const struct applesmc_entry *entry, +- const u8 *buf, u8 len) ++static int applesmc_write_entry(struct applesmc_device *smc, ++ const struct applesmc_entry *entry, const u8 *buf, u8 len) { - struct evdev_client *client = file->private_data; - struct evdev *evdev = client->evdev; -- unsigned int i; + int ret; - mutex_lock(&evdev->mutex); + if (entry->len != len) + return -EINVAL; +- mutex_lock(&smcreg.mutex); +- ret = write_smc(APPLESMC_WRITE_CMD, entry->key, buf, len); +- mutex_unlock(&smcreg.mutex); ++ mutex_lock(&smc->reg.mutex); ++ ret = write_smc(smc, entry->key, buf, len); ++ mutex_unlock(&smc->reg.mutex); + return ret; + } -@@ -439,11 +448,6 @@ static int evdev_release(struct inode *inode, struct file *file) +-static const struct applesmc_entry *applesmc_get_entry_by_index(int index) ++static const struct applesmc_entry *applesmc_get_entry_by_index( ++ struct applesmc_device *smc, int index) + { +- struct applesmc_entry *cache = &smcreg.cache[index]; +- u8 key[4], info[6]; +- __be32 be; ++ struct applesmc_entry *cache = &smc->reg.cache[index]; ++ char key[4]; + int ret = 0; - evdev_detach_client(evdev, client); + if (cache->valid) + return cache; -- for (i = 0; i < EV_CNT; ++i) -- bitmap_free(client->evmasks[i]); -- -- kvfree(client); +- mutex_lock(&smcreg.mutex); ++ mutex_lock(&smc->reg.mutex); + + if (cache->valid) + goto out; +- be = cpu_to_be32(index); +- ret = read_smc(APPLESMC_GET_KEY_BY_INDEX_CMD, (u8 *)&be, key, 4); ++ ret = get_smc_key_by_index(smc, index, key); + if (ret) + goto out; +- ret = read_smc(APPLESMC_GET_KEY_TYPE_CMD, key, info, 6); ++ memcpy(cache->key, key, 4); ++ ++ ret = get_smc_key_info(smc, key, cache); + if (ret) + goto out; - - evdev_close_device(evdev); +- memcpy(cache->key, key, 4); +- cache->len = info[0]; +- memcpy(cache->type, &info[1], 4); +- cache->flags = info[5]; + cache->valid = true; + out: +- mutex_unlock(&smcreg.mutex); ++ mutex_unlock(&smc->reg.mutex); + if (ret) + return ERR_PTR(ret); + return cache; + } + +-static int applesmc_get_lower_bound(unsigned int *lo, const char *key) ++static int applesmc_get_lower_bound(struct applesmc_device *smc, ++ unsigned int *lo, const char *key) + { +- int begin = 0, end = smcreg.key_count; ++ int begin = 0, end = smc->reg.key_count; + const struct applesmc_entry *entry; + + while (begin != end) { + int middle = begin + (end - begin) / 2; +- entry = applesmc_get_entry_by_index(middle); ++ entry = applesmc_get_entry_by_index(smc, middle); + if (IS_ERR(entry)) { + *lo = 0; + return PTR_ERR(entry); +@@ -422,16 +641,17 @@ static int applesmc_get_lower_bound(unsigned int *lo, const char *key) return 0; -@@ -486,7 +490,6 @@ static int evdev_open(struct inode *inode, struct file *file) - - err_free_client: - evdev_detach_client(evdev, client); -- kvfree(client); - return error; } -diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig -index b784bb74a837..fb3d45acfd9d 100644 ---- a/drivers/leds/Kconfig -+++ b/drivers/leds/Kconfig -@@ -959,6 +959,13 @@ config LEDS_ACER_A500 - This option enables support for the Power Button LED of - Acer Iconia Tab A500. +-static int applesmc_get_upper_bound(unsigned int *hi, const char *key) ++static int applesmc_get_upper_bound(struct applesmc_device *smc, ++ unsigned int *hi, const char *key) + { +- int begin = 0, end = smcreg.key_count; ++ int begin = 0, end = smc->reg.key_count; + const struct applesmc_entry *entry; -+config LEDS_STEAMDECK -+ tristate "LED support for Steam Deck" -+ depends on LEDS_CLASS && MFD_STEAMDECK -+ help -+ This option enabled support for the status LED (next to the -+ power button) on Steam Deck -+ - source "drivers/leds/blink/Kconfig" + while (begin != end) { + int middle = begin + (end - begin) / 2; +- entry = applesmc_get_entry_by_index(middle); ++ entry = applesmc_get_entry_by_index(smc, middle); + if (IS_ERR(entry)) { +- *hi = smcreg.key_count; ++ *hi = smc->reg.key_count; + return PTR_ERR(entry); + } + if (strcmp(key, entry->key) < 0) +@@ -444,50 +664,54 @@ static int applesmc_get_upper_bound(unsigned int *hi, const char *key) + return 0; + } - comment "Flash and Torch LED drivers" -diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile -index 18afbb5a23ee..b0ff19d14419 100644 ---- a/drivers/leds/Makefile -+++ b/drivers/leds/Makefile -@@ -81,6 +81,7 @@ obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o - obj-$(CONFIG_LEDS_PWM) += leds-pwm.o - obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o - obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o -+obj-$(CONFIG_LEDS_STEAMDECK) += leds-steamdeck.o - obj-$(CONFIG_LEDS_SUN50I_A100) += leds-sun50i-a100.o - obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o - obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o -diff --git a/drivers/leds/leds-steamdeck.c b/drivers/leds/leds-steamdeck.c -new file mode 100644 -index 000000000000..56d31d2dd099 ---- /dev/null -+++ b/drivers/leds/leds-steamdeck.c -@@ -0,0 +1,74 @@ -+// SPDX-License-Identifier: GPL-2.0+ -+ -+/* -+ * Steam Deck EC MFD LED cell driver -+ * -+ * Copyright (C) 2021-2022 Valve Corporation -+ * +-static const struct applesmc_entry *applesmc_get_entry_by_key(const char *key) ++static const struct applesmc_entry *applesmc_get_entry_by_key( ++ struct applesmc_device *smc, const char *key) + { + int begin, end; + int ret; + +- ret = applesmc_get_lower_bound(&begin, key); ++ ret = applesmc_get_lower_bound(smc, &begin, key); + if (ret) + return ERR_PTR(ret); +- ret = applesmc_get_upper_bound(&end, key); ++ ret = applesmc_get_upper_bound(smc, &end, key); + if (ret) + return ERR_PTR(ret); + if (end - begin != 1) + return ERR_PTR(-EINVAL); + +- return applesmc_get_entry_by_index(begin); ++ return applesmc_get_entry_by_index(smc, begin); + } + +-static int applesmc_read_key(const char *key, u8 *buffer, u8 len) ++static int applesmc_read_key(struct applesmc_device *smc, ++ const char *key, u8 *buffer, u8 len) + { + const struct applesmc_entry *entry; + +- entry = applesmc_get_entry_by_key(key); ++ entry = applesmc_get_entry_by_key(smc, key); + if (IS_ERR(entry)) + return PTR_ERR(entry); + +- return applesmc_read_entry(entry, buffer, len); ++ return applesmc_read_entry(smc, entry, buffer, len); + } + +-static int applesmc_write_key(const char *key, const u8 *buffer, u8 len) ++static int applesmc_write_key(struct applesmc_device *smc, ++ const char *key, const u8 *buffer, u8 len) + { + const struct applesmc_entry *entry; + +- entry = applesmc_get_entry_by_key(key); ++ entry = applesmc_get_entry_by_key(smc, key); + if (IS_ERR(entry)) + return PTR_ERR(entry); + +- return applesmc_write_entry(entry, buffer, len); ++ return applesmc_write_entry(smc, entry, buffer, len); + } + +-static int applesmc_has_key(const char *key, bool *value) ++static int applesmc_has_key(struct applesmc_device *smc, ++ const char *key, bool *value) + { + const struct applesmc_entry *entry; + +- entry = applesmc_get_entry_by_key(key); ++ entry = applesmc_get_entry_by_key(smc, key); + if (IS_ERR(entry) && PTR_ERR(entry) != -EINVAL) + return PTR_ERR(entry); + +@@ -498,12 +722,13 @@ static int applesmc_has_key(const char *key, bool *value) + /* + * applesmc_read_s16 - Read 16-bit signed big endian register + */ +-static int applesmc_read_s16(const char *key, s16 *value) ++static int applesmc_read_s16(struct applesmc_device *smc, ++ const char *key, s16 *value) + { + u8 buffer[2]; + int ret; + +- ret = applesmc_read_key(key, buffer, 2); ++ ret = applesmc_read_key(smc, key, buffer, 2); + if (ret) + return ret; + +@@ -511,31 +736,68 @@ static int applesmc_read_s16(const char *key, s16 *value) + return 0; + } + ++/** ++ * applesmc_float_to_u32 - Retrieve the integral part of a float. ++ * This is needed because Apple made fans use float values in the T2. ++ * The fractional point is not significantly useful though, and the integral ++ * part can be easily extracted. + */ -+ -+#include -+#include -+#include -+ -+struct steamdeck_led { -+ struct acpi_device *adev; -+ struct led_classdev cdev; -+}; -+ -+static int steamdeck_leds_brightness_set(struct led_classdev *cdev, -+ enum led_brightness value) ++static inline u32 applesmc_float_to_u32(u32 d) +{ -+ struct steamdeck_led *sd = container_of(cdev, struct steamdeck_led, -+ cdev); ++ u8 sign = (u8) ((d >> 31) & 1); ++ s32 exp = (s32) ((d >> 23) & 0xff) - 0x7f; ++ u32 fr = d & ((1u << 23) - 1); + -+ if (ACPI_FAILURE(acpi_execute_simple_method(sd->adev->handle, -+ "CHBV", value))) -+ return -EIO; ++ if (sign || exp < 0) ++ return 0; + -+ return 0; ++ return (u32) ((1u << exp) + (fr >> (23 - exp))); +} + -+static int steamdeck_leds_probe(struct platform_device *pdev) ++/** ++ * applesmc_u32_to_float - Convert an u32 into a float. ++ * See applesmc_float_to_u32 for a rationale. ++ */ ++static inline u32 applesmc_u32_to_float(u32 d) +{ -+ struct device *dev = &pdev->dev; -+ struct steamdeck_led *sd; -+ int ret; -+ -+ sd = devm_kzalloc(dev, sizeof(*sd), GFP_KERNEL); -+ if (!sd) -+ return -ENOMEM; -+ -+ sd->adev = ACPI_COMPANION(dev->parent); ++ u32 dc = d, bc = 0, exp; + -+ sd->cdev.name = "status:white"; -+ sd->cdev.brightness_set_blocking = steamdeck_leds_brightness_set; -+ sd->cdev.max_brightness = 100; ++ if (!d) ++ return 0; + -+ ret = devm_led_classdev_register(dev, &sd->cdev); -+ if (ret) { -+ dev_err(dev, "Failed to register LEDs device: %d\n", ret); -+ return ret; -+ } ++ while (dc >>= 1) ++ ++bc; ++ exp = 0x7f + bc; + -+ return 0; ++ return (u32) ((exp << 23) | ++ ((d << (23 - (exp - 0x7f))) & ((1u << 23) - 1))); +} -+ -+static const struct platform_device_id steamdeck_leds_id_table[] = { -+ { .name = "steamdeck-leds" }, -+ {} -+}; -+MODULE_DEVICE_TABLE(platform, steamdeck_leds_id_table); -+ -+static struct platform_driver steamdeck_leds_driver = { -+ .probe = steamdeck_leds_probe, -+ .driver = { -+ .name = "steamdeck-leds", -+ }, -+ .id_table = steamdeck_leds_id_table, -+}; -+module_platform_driver(steamdeck_leds_driver); -+ -+MODULE_AUTHOR("Andrey Smirnov "); -+MODULE_DESCRIPTION("Steam Deck LEDs driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c -index 1ae2c71bb383..784829ada178 100644 ---- a/drivers/md/dm-crypt.c -+++ b/drivers/md/dm-crypt.c -@@ -3315,6 +3315,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) - goto bad; + /* + * applesmc_device_init - initialize the accelerometer. Can sleep. + */ +-static void applesmc_device_init(void) ++static void applesmc_device_init(struct applesmc_device *smc) + { + int total; + u8 buffer[2]; + +- if (!smcreg.has_accelerometer) ++ if (!smc->reg.has_accelerometer) + return; + + for (total = INIT_TIMEOUT_MSECS; total > 0; total -= INIT_WAIT_MSECS) { +- if (!applesmc_read_key(MOTION_SENSOR_KEY, buffer, 2) && ++ if (!applesmc_read_key(smc, MOTION_SENSOR_KEY, buffer, 2) && + (buffer[0] != 0x00 || buffer[1] != 0x00)) + return; + buffer[0] = 0xe0; + buffer[1] = 0x00; +- applesmc_write_key(MOTION_SENSOR_KEY, buffer, 2); ++ applesmc_write_key(smc, MOTION_SENSOR_KEY, buffer, 2); + msleep(INIT_WAIT_MSECS); } -+#ifdef CONFIG_CACHY -+ set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags); -+ set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); -+#endif -+ - ret = crypt_ctr_cipher(ti, argv[0], argv[1]); - if (ret < 0) - goto bad; -diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig -index 331b8e535e5b..80dabeebf580 100644 ---- a/drivers/media/v4l2-core/Kconfig -+++ b/drivers/media/v4l2-core/Kconfig -@@ -40,6 +40,11 @@ config VIDEO_TUNER - config V4L2_JPEG_HELPER - tristate + pr_warn("failed to init the device\n"); + } -+config V4L2_LOOPBACK -+ tristate "V4L2 loopback device" -+ help -+ V4L2 loopback device -+ - # Used by drivers that need v4l2-h264.ko - config V4L2_H264 - tristate -diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile -index 2177b9d63a8f..c179507cedc4 100644 ---- a/drivers/media/v4l2-core/Makefile -+++ b/drivers/media/v4l2-core/Makefile -@@ -33,5 +33,7 @@ obj-$(CONFIG_V4L2_JPEG_HELPER) += v4l2-jpeg.o - obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o - obj-$(CONFIG_V4L2_VP9) += v4l2-vp9.o +-static int applesmc_init_index(struct applesmc_registers *s) ++static int applesmc_init_index(struct applesmc_device *smc, ++ struct applesmc_registers *s) + { + const struct applesmc_entry *entry; + unsigned int i; +@@ -548,7 +810,7 @@ static int applesmc_init_index(struct applesmc_registers *s) + return -ENOMEM; -+obj-$(CONFIG_V4L2_LOOPBACK) += v4l2loopback.o -+ - obj-$(CONFIG_VIDEO_TUNER) += tuner.o - obj-$(CONFIG_VIDEO_DEV) += v4l2-dv-timings.o videodev.o -diff --git a/drivers/media/v4l2-core/v4l2loopback.c b/drivers/media/v4l2-core/v4l2loopback.c -new file mode 100644 -index 000000000000..25cb1beb26e5 ---- /dev/null -+++ b/drivers/media/v4l2-core/v4l2loopback.c -@@ -0,0 +1,3184 @@ -+/* -*- c-file-style: "linux" -*- */ -+/* -+ * v4l2loopback.c -- video4linux2 loopback driver -+ * -+ * Copyright (C) 2005-2009 Vasily Levin (vasaka@gmail.com) -+ * Copyright (C) 2010-2023 IOhannes m zmoelnig (zmoelnig@iem.at) -+ * Copyright (C) 2011 Stefan Diewald (stefan.diewald@mytum.de) -+ * Copyright (C) 2012 Anton Novikov (random.plant@gmail.com) -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include + for (i = s->temp_begin; i < s->temp_end; i++) { +- entry = applesmc_get_entry_by_index(i); ++ entry = applesmc_get_entry_by_index(smc, i); + if (IS_ERR(entry)) + continue; + if (strcmp(entry->type, TEMP_SENSOR_TYPE)) +@@ -562,9 +824,9 @@ static int applesmc_init_index(struct applesmc_registers *s) + /* + * applesmc_init_smcreg_try - Try to initialize register cache. Idempotent. + */ +-static int applesmc_init_smcreg_try(void) ++static int applesmc_init_smcreg_try(struct applesmc_device *smc) + { +- struct applesmc_registers *s = &smcreg; ++ struct applesmc_registers *s = &smc->reg; + bool left_light_sensor = false, right_light_sensor = false; + unsigned int count; + u8 tmp[1]; +@@ -573,7 +835,7 @@ static int applesmc_init_smcreg_try(void) + if (s->init_complete) + return 0; + +- ret = read_register_count(&count); ++ ret = read_register_count(smc, &count); + if (ret) + return ret; + +@@ -590,35 +852,35 @@ static int applesmc_init_smcreg_try(void) + if (!s->cache) + return -ENOMEM; + +- ret = applesmc_read_key(FANS_COUNT, tmp, 1); ++ ret = applesmc_read_key(smc, FANS_COUNT, tmp, 1); + if (ret) + return ret; + s->fan_count = tmp[0]; + if (s->fan_count > 10) + s->fan_count = 10; + +- ret = applesmc_get_lower_bound(&s->temp_begin, "T"); ++ ret = applesmc_get_lower_bound(smc, &s->temp_begin, "T"); + if (ret) + return ret; +- ret = applesmc_get_lower_bound(&s->temp_end, "U"); ++ ret = applesmc_get_lower_bound(smc, &s->temp_end, "U"); + if (ret) + return ret; + s->temp_count = s->temp_end - s->temp_begin; + +- ret = applesmc_init_index(s); ++ ret = applesmc_init_index(smc, s); + if (ret) + return ret; + +- ret = applesmc_has_key(LIGHT_SENSOR_LEFT_KEY, &left_light_sensor); ++ ret = applesmc_has_key(smc, LIGHT_SENSOR_LEFT_KEY, &left_light_sensor); + if (ret) + return ret; +- ret = applesmc_has_key(LIGHT_SENSOR_RIGHT_KEY, &right_light_sensor); ++ ret = applesmc_has_key(smc, LIGHT_SENSOR_RIGHT_KEY, &right_light_sensor); + if (ret) + return ret; +- ret = applesmc_has_key(MOTION_SENSOR_KEY, &s->has_accelerometer); ++ ret = applesmc_has_key(smc, MOTION_SENSOR_KEY, &s->has_accelerometer); + if (ret) + return ret; +- ret = applesmc_has_key(BACKLIGHT_KEY, &s->has_key_backlight); ++ ret = applesmc_has_key(smc, BACKLIGHT_KEY, &s->has_key_backlight); + if (ret) + return ret; + +@@ -634,13 +896,13 @@ static int applesmc_init_smcreg_try(void) + return 0; + } + +-static void applesmc_destroy_smcreg(void) ++static void applesmc_destroy_smcreg(struct applesmc_device *smc) + { +- kfree(smcreg.index); +- smcreg.index = NULL; +- kfree(smcreg.cache); +- smcreg.cache = NULL; +- smcreg.init_complete = false; ++ kfree(smc->reg.index); ++ smc->reg.index = NULL; ++ kfree(smc->reg.cache); ++ smc->reg.cache = NULL; ++ smc->reg.init_complete = false; + } + + /* +@@ -649,12 +911,12 @@ static void applesmc_destroy_smcreg(void) + * Retries until initialization is successful, or the operation times out. + * + */ +-static int applesmc_init_smcreg(void) ++static int applesmc_init_smcreg(struct applesmc_device *smc) + { + int ms, ret; + + for (ms = 0; ms < INIT_TIMEOUT_MSECS; ms += INIT_WAIT_MSECS) { +- ret = applesmc_init_smcreg_try(); ++ ret = applesmc_init_smcreg_try(smc); + if (!ret) { + if (ms) + pr_info("init_smcreg() took %d ms\n", ms); +@@ -663,50 +925,223 @@ static int applesmc_init_smcreg(void) + msleep(INIT_WAIT_MSECS); + } + +- applesmc_destroy_smcreg(); ++ applesmc_destroy_smcreg(smc); + + return ret; + } + + /* Device model stuff */ +-static int applesmc_probe(struct platform_device *dev) + -+#include -+#include "v4l2loopback.h" ++static int applesmc_init_resources(struct applesmc_device *smc); ++static void applesmc_free_resources(struct applesmc_device *smc); ++static int applesmc_create_modules(struct applesmc_device *smc); ++static void applesmc_destroy_modules(struct applesmc_device *smc); + -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) -+#error This module is not supported on kernels before 4.0.0. -+#endif ++static int applesmc_add(struct acpi_device *dev) + { ++ struct applesmc_device *smc; + int ret; + +- ret = applesmc_init_smcreg(); ++ smc = kzalloc(sizeof(struct applesmc_device), GFP_KERNEL); ++ if (!smc) ++ return -ENOMEM; ++ smc->dev = dev; ++ smc->ldev = &dev->dev; ++ mutex_init(&smc->reg.mutex); + -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) -+#define strscpy strlcpy -+#endif ++ dev_set_drvdata(&dev->dev, smc); + -+#if defined(timer_setup) && defined(from_timer) -+#define HAVE_TIMER_SETUP -+#endif ++ ret = applesmc_init_resources(smc); + if (ret) +- return ret; ++ goto out_mem; + -+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0) -+#define VFL_TYPE_VIDEO VFL_TYPE_GRABBER -+#endif ++ ret = applesmc_init_smcreg(smc); ++ if (ret) ++ goto out_res; + -+#define V4L2LOOPBACK_VERSION_CODE \ -+ KERNEL_VERSION(V4L2LOOPBACK_VERSION_MAJOR, V4L2LOOPBACK_VERSION_MINOR, \ -+ V4L2LOOPBACK_VERSION_BUGFIX) ++ applesmc_device_init(smc); + -+MODULE_DESCRIPTION("V4L2 loopback video device"); -+MODULE_AUTHOR("Vasily Levin, " -+ "IOhannes m zmoelnig ," -+ "Stefan Diewald," -+ "Anton Novikov" -+ "et al."); -+#ifdef SNAPSHOT_VERSION -+MODULE_VERSION(__stringify(SNAPSHOT_VERSION)); -+#else -+MODULE_VERSION("" __stringify(V4L2LOOPBACK_VERSION_MAJOR) "." __stringify( -+ V4L2LOOPBACK_VERSION_MINOR) "." __stringify(V4L2LOOPBACK_VERSION_BUGFIX)); -+#endif -+MODULE_LICENSE("GPL"); ++ ret = applesmc_create_modules(smc); ++ if (ret) ++ goto out_reg; + -+/* -+ * helpers -+ */ -+#define dprintk(fmt, args...) \ -+ do { \ -+ if (debug > 0) { \ -+ printk(KERN_INFO "v4l2-loopback[" __stringify( \ -+ __LINE__) "], pid(%d): " fmt, \ -+ task_pid_nr(current), ##args); \ -+ } \ -+ } while (0) ++ return 0; + -+#define MARK() \ -+ do { \ -+ if (debug > 1) { \ -+ printk(KERN_INFO "%s:%d[%s], pid(%d)\n", __FILE__, \ -+ __LINE__, __func__, task_pid_nr(current)); \ -+ } \ -+ } while (0) ++out_reg: ++ applesmc_destroy_smcreg(smc); ++out_res: ++ applesmc_free_resources(smc); ++out_mem: ++ dev_set_drvdata(&dev->dev, NULL); ++ mutex_destroy(&smc->reg.mutex); ++ kfree(smc); + -+#define dprintkrw(fmt, args...) \ -+ do { \ -+ if (debug > 2) { \ -+ printk(KERN_INFO "v4l2-loopback[" __stringify( \ -+ __LINE__) "], pid(%d): " fmt, \ -+ task_pid_nr(current), ##args); \ -+ } \ -+ } while (0) ++ return ret; ++} + -+static inline void v4l2l_get_timestamp(struct v4l2_buffer *b) ++static void applesmc_remove(struct acpi_device *dev) +{ -+ struct timespec64 ts; -+ ktime_get_ts64(&ts); ++ struct applesmc_device *smc = dev_get_drvdata(&dev->dev); + -+ b->timestamp.tv_sec = ts.tv_sec; -+ b->timestamp.tv_usec = (ts.tv_nsec / NSEC_PER_USEC); -+ b->flags |= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; ++ applesmc_destroy_modules(smc); ++ applesmc_destroy_smcreg(smc); ++ applesmc_free_resources(smc); + +- applesmc_device_init(); ++ mutex_destroy(&smc->reg.mutex); ++ kfree(smc); ++ ++ return; +} + -+#if BITS_PER_LONG == 32 -+#include /* do_div() for 64bit division */ -+static inline int v4l2l_mod64(const s64 A, const u32 B) ++static acpi_status applesmc_walk_resources(struct acpi_resource *res, ++ void *data) +{ -+ u64 a = (u64)A; -+ u32 b = B; ++ struct applesmc_device *smc = data; + -+ if (A > 0) -+ return do_div(a, b); -+ a = -A; -+ return -do_div(a, b); -+} -+#else -+static inline int v4l2l_mod64(const s64 A, const u32 B) -+{ -+ return A % B; -+} -+#endif ++ switch (res->type) { ++ case ACPI_RESOURCE_TYPE_IO: ++ if (!smc->port_base_set) { ++ if (res->data.io.address_length < APPLESMC_NR_PORTS) ++ return AE_ERROR; ++ smc->port_base = res->data.io.minimum; ++ smc->port_base_set = true; ++ } ++ return AE_OK; ++ ++ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: ++ if (!smc->iomem_base_set) { ++ if (res->data.fixed_memory32.address_length < ++ APPLESMC_IOMEM_MIN_SIZE) { ++ dev_warn(smc->ldev, "found iomem but it's too small: %u\n", ++ res->data.fixed_memory32.address_length); ++ return AE_OK; ++ } ++ smc->iomem_base_addr = res->data.fixed_memory32.address; ++ smc->iomem_base_size = res->data.fixed_memory32.address_length; ++ smc->iomem_base_set = true; ++ } ++ return AE_OK; + -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0) -+typedef unsigned __poll_t; -+#endif ++ case ACPI_RESOURCE_TYPE_END_TAG: ++ if (smc->port_base_set) ++ return AE_OK; ++ else ++ return AE_NOT_FOUND; + -+/* module constants -+ * can be overridden during he build process using something like -+ * make KCPPFLAGS="-DMAX_DEVICES=100" -+ */ ++ default: ++ return AE_OK; ++ } ++} + -+/* maximum number of v4l2loopback devices that can be created */ -+#ifndef MAX_DEVICES -+#define MAX_DEVICES 8 -+#endif ++static int applesmc_try_enable_iomem(struct applesmc_device *smc); + -+/* whether the default is to announce capabilities exclusively or not */ -+#ifndef V4L2LOOPBACK_DEFAULT_EXCLUSIVECAPS -+#define V4L2LOOPBACK_DEFAULT_EXCLUSIVECAPS 0 -+#endif ++static int applesmc_init_resources(struct applesmc_device *smc) ++{ ++ int ret; + -+/* when a producer is considered to have gone stale */ -+#ifndef MAX_TIMEOUT -+#define MAX_TIMEOUT (100 * 1000) /* in msecs */ -+#endif ++ ret = acpi_walk_resources(smc->dev->handle, METHOD_NAME__CRS, ++ applesmc_walk_resources, smc); ++ if (ACPI_FAILURE(ret)) ++ return -ENXIO; + -+/* max buffers that can be mapped, actually they -+ * are all mapped to max_buffers buffers */ -+#ifndef MAX_BUFFERS -+#define MAX_BUFFERS 32 -+#endif ++ if (!request_region(smc->port_base, APPLESMC_NR_PORTS, "applesmc")) ++ return -ENXIO; + -+/* module parameters */ -+static int debug = 0; -+module_param(debug, int, S_IRUGO | S_IWUSR); -+MODULE_PARM_DESC(debug, "debugging level (higher values == more verbose)"); ++ if (smc->iomem_base_set) { ++ if (applesmc_try_enable_iomem(smc)) ++ smc->iomem_base_set = false; ++ } + -+#define V4L2LOOPBACK_DEFAULT_MAX_BUFFERS 2 -+static int max_buffers = V4L2LOOPBACK_DEFAULT_MAX_BUFFERS; -+module_param(max_buffers, int, S_IRUGO); -+MODULE_PARM_DESC(max_buffers, -+ "how many buffers should be allocated [DEFAULT: " __stringify( -+ V4L2LOOPBACK_DEFAULT_MAX_BUFFERS) "]"); ++ return 0; ++} + -+/* how many times a device can be opened -+ * the per-module default value can be overridden on a per-device basis using -+ * the /sys/devices interface -+ * -+ * note that max_openers should be at least 2 in order to get a working system: -+ * one opener for the producer and one opener for the consumer -+ * however, we leave that to the user -+ */ -+#define V4L2LOOPBACK_DEFAULT_MAX_OPENERS 10 -+static int max_openers = V4L2LOOPBACK_DEFAULT_MAX_OPENERS; -+module_param(max_openers, int, S_IRUGO | S_IWUSR); -+MODULE_PARM_DESC( -+ max_openers, -+ "how many users can open the loopback device [DEFAULT: " __stringify( -+ V4L2LOOPBACK_DEFAULT_MAX_OPENERS) "]"); ++static int applesmc_try_enable_iomem(struct applesmc_device *smc) ++{ ++ u8 test_val, ldkn_version; + -+static int devices = -1; -+module_param(devices, int, 0); -+MODULE_PARM_DESC(devices, "how many devices should be created"); ++ dev_dbg(smc->ldev, "Trying to enable iomem based communication\n"); ++ smc->iomem_base = ioremap(smc->iomem_base_addr, smc->iomem_base_size); ++ if (!smc->iomem_base) ++ goto out; + -+static int video_nr[MAX_DEVICES] = { [0 ...(MAX_DEVICES - 1)] = -1 }; -+module_param_array(video_nr, int, NULL, 0444); -+MODULE_PARM_DESC(video_nr, -+ "video device numbers (-1=auto, 0=/dev/video0, etc.)"); ++ /* Apple's driver does this check for some reason */ ++ test_val = ioread8(smc->iomem_base + APPLESMC_IOMEM_KEY_STATUS); ++ if (test_val == 0xff) { ++ dev_warn(smc->ldev, ++ "iomem enable failed: initial status is 0xff (is %x)\n", ++ test_val); ++ goto out_iomem; ++ } + -+static char *card_label[MAX_DEVICES]; -+module_param_array(card_label, charp, NULL, 0000); -+MODULE_PARM_DESC(card_label, "card labels for each device"); ++ if (read_smc(smc, "LDKN", &ldkn_version, 1)) { ++ dev_warn(smc->ldev, "iomem enable failed: ldkn read failed\n"); ++ goto out_iomem; ++ } + -+static bool exclusive_caps[MAX_DEVICES] = { -+ [0 ...(MAX_DEVICES - 1)] = V4L2LOOPBACK_DEFAULT_EXCLUSIVECAPS -+}; -+module_param_array(exclusive_caps, bool, NULL, 0444); -+/* FIXXME: wording */ -+MODULE_PARM_DESC( -+ exclusive_caps, -+ "whether to announce OUTPUT/CAPTURE capabilities exclusively or not [DEFAULT: " __stringify( -+ V4L2LOOPBACK_DEFAULT_EXCLUSIVECAPS) "]"); ++ if (ldkn_version < 2) { ++ dev_warn(smc->ldev, ++ "iomem enable failed: ldkn version %u is less than minimum (2)\n", ++ ldkn_version); ++ goto out_iomem; ++ } + + return 0; + -+/* format specifications */ -+#define V4L2LOOPBACK_SIZE_MIN_WIDTH 2 -+#define V4L2LOOPBACK_SIZE_MIN_HEIGHT 1 -+#define V4L2LOOPBACK_SIZE_DEFAULT_MAX_WIDTH 8192 -+#define V4L2LOOPBACK_SIZE_DEFAULT_MAX_HEIGHT 8192 ++out_iomem: ++ iounmap(smc->iomem_base); + -+#define V4L2LOOPBACK_SIZE_DEFAULT_WIDTH 640 -+#define V4L2LOOPBACK_SIZE_DEFAULT_HEIGHT 480 ++out: ++ return -ENXIO; ++} + -+static int max_width = V4L2LOOPBACK_SIZE_DEFAULT_MAX_WIDTH; -+module_param(max_width, int, S_IRUGO); -+MODULE_PARM_DESC(max_width, -+ "maximum allowed frame width [DEFAULT: " __stringify( -+ V4L2LOOPBACK_SIZE_DEFAULT_MAX_WIDTH) "]"); -+static int max_height = V4L2LOOPBACK_SIZE_DEFAULT_MAX_HEIGHT; -+module_param(max_height, int, S_IRUGO); -+MODULE_PARM_DESC(max_height, -+ "maximum allowed frame height [DEFAULT: " __stringify( -+ V4L2LOOPBACK_SIZE_DEFAULT_MAX_HEIGHT) "]"); ++static void applesmc_free_resources(struct applesmc_device *smc) ++{ ++ if (smc->iomem_base_set) ++ iounmap(smc->iomem_base); ++ release_region(smc->port_base, APPLESMC_NR_PORTS); + } + + /* Synchronize device with memorized backlight state */ + static int applesmc_pm_resume(struct device *dev) + { +- if (smcreg.has_key_backlight) +- applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2); ++ struct applesmc_device *smc = dev_get_drvdata(dev); + -+static DEFINE_IDR(v4l2loopback_index_idr); -+static DEFINE_MUTEX(v4l2loopback_ctl_mutex); ++ if (smc->reg.has_key_backlight) ++ applesmc_write_key(smc, BACKLIGHT_KEY, smc->backlight_state, 2); + -+/* frame intervals */ -+#define V4L2LOOPBACK_FPS_MIN 0 -+#define V4L2LOOPBACK_FPS_MAX 1000 + return 0; + } + + /* Reinitialize device on resume from hibernation */ + static int applesmc_pm_restore(struct device *dev) + { +- applesmc_device_init(); ++ struct applesmc_device *smc = dev_get_drvdata(dev); + -+/* control IDs */ -+#define V4L2LOOPBACK_CID_BASE (V4L2_CID_USER_BASE | 0xf000) -+#define CID_KEEP_FORMAT (V4L2LOOPBACK_CID_BASE + 0) -+#define CID_SUSTAIN_FRAMERATE (V4L2LOOPBACK_CID_BASE + 1) -+#define CID_TIMEOUT (V4L2LOOPBACK_CID_BASE + 2) -+#define CID_TIMEOUT_IMAGE_IO (V4L2LOOPBACK_CID_BASE + 3) ++ applesmc_device_init(smc); + -+static int v4l2loopback_s_ctrl(struct v4l2_ctrl *ctrl); -+static const struct v4l2_ctrl_ops v4l2loopback_ctrl_ops = { -+ .s_ctrl = v4l2loopback_s_ctrl, -+}; -+static const struct v4l2_ctrl_config v4l2loopback_ctrl_keepformat = { -+ // clang-format off -+ .ops = &v4l2loopback_ctrl_ops, -+ .id = CID_KEEP_FORMAT, -+ .name = "keep_format", -+ .type = V4L2_CTRL_TYPE_BOOLEAN, -+ .min = 0, -+ .max = 1, -+ .step = 1, -+ .def = 0, -+ // clang-format on -+}; -+static const struct v4l2_ctrl_config v4l2loopback_ctrl_sustainframerate = { -+ // clang-format off -+ .ops = &v4l2loopback_ctrl_ops, -+ .id = CID_SUSTAIN_FRAMERATE, -+ .name = "sustain_framerate", -+ .type = V4L2_CTRL_TYPE_BOOLEAN, -+ .min = 0, -+ .max = 1, -+ .step = 1, -+ .def = 0, -+ // clang-format on -+}; -+static const struct v4l2_ctrl_config v4l2loopback_ctrl_timeout = { -+ // clang-format off -+ .ops = &v4l2loopback_ctrl_ops, -+ .id = CID_TIMEOUT, -+ .name = "timeout", -+ .type = V4L2_CTRL_TYPE_INTEGER, -+ .min = 0, -+ .max = MAX_TIMEOUT, -+ .step = 1, -+ .def = 0, -+ // clang-format on + return applesmc_pm_resume(dev); + } + ++static const struct acpi_device_id applesmc_ids[] = { ++ {"APP0001", 0}, ++ {"", 0}, +}; -+static const struct v4l2_ctrl_config v4l2loopback_ctrl_timeoutimageio = { -+ // clang-format off -+ .ops = &v4l2loopback_ctrl_ops, -+ .id = CID_TIMEOUT_IMAGE_IO, -+ .name = "timeout_image_io", -+ .type = V4L2_CTRL_TYPE_BUTTON, -+ .min = 0, -+ .max = 1, -+ .step = 1, -+ .def = 0, -+ // clang-format on -+}; -+ -+/* module structures */ -+struct v4l2loopback_private { -+ int device_nr; -+}; -+ -+/* TODO(vasaka) use typenames which are common to kernel, but first find out if -+ * it is needed */ -+/* struct keeping state and settings of loopback device */ + -+struct v4l2l_buffer { -+ struct v4l2_buffer buffer; -+ struct list_head list_head; -+ int use_count; -+}; + static const struct dev_pm_ops applesmc_pm_ops = { + .resume = applesmc_pm_resume, + .restore = applesmc_pm_restore, + }; + +-static struct platform_driver applesmc_driver = { +- .probe = applesmc_probe, +- .driver = { +- .name = "applesmc", +- .pm = &applesmc_pm_ops, ++static struct acpi_driver applesmc_driver = { ++ .name = "applesmc", ++ .class = "applesmc", ++ .ids = applesmc_ids, ++ .ops = { ++ .add = applesmc_add, ++ .remove = applesmc_remove ++ }, ++ .drv = { ++ .pm = &applesmc_pm_ops + }, + }; + +@@ -714,25 +1149,26 @@ static struct platform_driver applesmc_driver = { + * applesmc_calibrate - Set our "resting" values. Callers must + * hold applesmc_lock. + */ +-static void applesmc_calibrate(void) ++static void applesmc_calibrate(struct applesmc_device *smc) + { +- applesmc_read_s16(MOTION_SENSOR_X_KEY, &rest_x); +- applesmc_read_s16(MOTION_SENSOR_Y_KEY, &rest_y); +- rest_x = -rest_x; ++ applesmc_read_s16(smc, MOTION_SENSOR_X_KEY, &smc->rest_x); ++ applesmc_read_s16(smc, MOTION_SENSOR_Y_KEY, &smc->rest_y); ++ smc->rest_x = -smc->rest_x; + } + + static void applesmc_idev_poll(struct input_dev *idev) + { ++ struct applesmc_device *smc = dev_get_drvdata(&idev->dev); + s16 x, y; + +- if (applesmc_read_s16(MOTION_SENSOR_X_KEY, &x)) ++ if (applesmc_read_s16(smc, MOTION_SENSOR_X_KEY, &x)) + return; +- if (applesmc_read_s16(MOTION_SENSOR_Y_KEY, &y)) ++ if (applesmc_read_s16(smc, MOTION_SENSOR_Y_KEY, &y)) + return; + + x = -x; +- input_report_abs(idev, ABS_X, x - rest_x); +- input_report_abs(idev, ABS_Y, y - rest_y); ++ input_report_abs(idev, ABS_X, x - smc->rest_x); ++ input_report_abs(idev, ABS_Y, y - smc->rest_y); + input_sync(idev); + } + +@@ -747,16 +1183,17 @@ static ssize_t applesmc_name_show(struct device *dev, + static ssize_t applesmc_position_show(struct device *dev, + struct device_attribute *attr, char *buf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + int ret; + s16 x, y, z; + +- ret = applesmc_read_s16(MOTION_SENSOR_X_KEY, &x); ++ ret = applesmc_read_s16(smc, MOTION_SENSOR_X_KEY, &x); + if (ret) + goto out; +- ret = applesmc_read_s16(MOTION_SENSOR_Y_KEY, &y); ++ ret = applesmc_read_s16(smc, MOTION_SENSOR_Y_KEY, &y); + if (ret) + goto out; +- ret = applesmc_read_s16(MOTION_SENSOR_Z_KEY, &z); ++ ret = applesmc_read_s16(smc, MOTION_SENSOR_Z_KEY, &z); + if (ret) + goto out; + +@@ -770,6 +1207,7 @@ static ssize_t applesmc_position_show(struct device *dev, + static ssize_t applesmc_light_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + const struct applesmc_entry *entry; + static int data_length; + int ret; +@@ -777,7 +1215,7 @@ static ssize_t applesmc_light_show(struct device *dev, + u8 buffer[10]; + + if (!data_length) { +- entry = applesmc_get_entry_by_key(LIGHT_SENSOR_LEFT_KEY); ++ entry = applesmc_get_entry_by_key(smc, LIGHT_SENSOR_LEFT_KEY); + if (IS_ERR(entry)) + return PTR_ERR(entry); + if (entry->len > 10) +@@ -786,7 +1224,7 @@ static ssize_t applesmc_light_show(struct device *dev, + pr_info("light sensor data length set to %d\n", data_length); + } + +- ret = applesmc_read_key(LIGHT_SENSOR_LEFT_KEY, buffer, data_length); ++ ret = applesmc_read_key(smc, LIGHT_SENSOR_LEFT_KEY, buffer, data_length); + if (ret) + goto out; + /* newer macbooks report a single 10-bit bigendian value */ +@@ -796,7 +1234,7 @@ static ssize_t applesmc_light_show(struct device *dev, + } + left = buffer[2]; + +- ret = applesmc_read_key(LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); ++ ret = applesmc_read_key(smc, LIGHT_SENSOR_RIGHT_KEY, buffer, data_length); + if (ret) + goto out; + right = buffer[2]; +@@ -812,7 +1250,8 @@ static ssize_t applesmc_light_show(struct device *dev, + static ssize_t applesmc_show_sensor_label(struct device *dev, + struct device_attribute *devattr, char *sysfsbuf) + { +- const char *key = smcreg.index[to_index(devattr)]; ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ const char *key = smc->reg.index[to_index(devattr)]; + + return sysfs_emit(sysfsbuf, "%s\n", key); + } +@@ -821,12 +1260,13 @@ static ssize_t applesmc_show_sensor_label(struct device *dev, + static ssize_t applesmc_show_temperature(struct device *dev, + struct device_attribute *devattr, char *sysfsbuf) + { +- const char *key = smcreg.index[to_index(devattr)]; ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ const char *key = smc->reg.index[to_index(devattr)]; + int ret; + s16 value; + int temp; + +- ret = applesmc_read_s16(key, &value); ++ ret = applesmc_read_s16(smc, key, &value); + if (ret) + return ret; + +@@ -838,6 +1278,8 @@ static ssize_t applesmc_show_temperature(struct device *dev, + static ssize_t applesmc_show_fan_speed(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ const struct applesmc_entry *entry; + int ret; + unsigned int speed = 0; + char newkey[5]; +@@ -846,11 +1288,21 @@ static ssize_t applesmc_show_fan_speed(struct device *dev, + scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)], + to_index(attr)); + +- ret = applesmc_read_key(newkey, buffer, 2); ++ entry = applesmc_get_entry_by_key(smc, newkey); ++ if (IS_ERR(entry)) ++ return PTR_ERR(entry); ++ ++ if (!strcmp(entry->type, FLOAT_TYPE)) { ++ ret = applesmc_read_entry(smc, entry, (u8 *) &speed, 4); ++ speed = applesmc_float_to_u32(speed); ++ } else { ++ ret = applesmc_read_entry(smc, entry, buffer, 2); ++ speed = ((buffer[0] << 8 | buffer[1]) >> 2); ++ } + -+struct v4l2_loopback_device { -+ struct v4l2_device v4l2_dev; -+ struct v4l2_ctrl_handler ctrl_handler; -+ struct video_device *vdev; -+ /* pixel and stream format */ -+ struct v4l2_pix_format pix_format; -+ bool pix_format_has_valid_sizeimage; -+ struct v4l2_captureparm capture_param; -+ unsigned long frame_jiffies; + if (ret) + return ret; + +- speed = ((buffer[0] << 8 | buffer[1]) >> 2); + return sysfs_emit(sysfsbuf, "%u\n", speed); + } + +@@ -858,6 +1310,8 @@ static ssize_t applesmc_store_fan_speed(struct device *dev, + struct device_attribute *attr, + const char *sysfsbuf, size_t count) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ const struct applesmc_entry *entry; + int ret; + unsigned long speed; + char newkey[5]; +@@ -869,9 +1323,18 @@ static ssize_t applesmc_store_fan_speed(struct device *dev, + scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)], + to_index(attr)); + +- buffer[0] = (speed >> 6) & 0xff; +- buffer[1] = (speed << 2) & 0xff; +- ret = applesmc_write_key(newkey, buffer, 2); ++ entry = applesmc_get_entry_by_key(smc, newkey); ++ if (IS_ERR(entry)) ++ return PTR_ERR(entry); ++ ++ if (!strcmp(entry->type, FLOAT_TYPE)) { ++ speed = applesmc_u32_to_float(speed); ++ ret = applesmc_write_entry(smc, entry, (u8 *) &speed, 4); ++ } else { ++ buffer[0] = (speed >> 6) & 0xff; ++ buffer[1] = (speed << 2) & 0xff; ++ ret = applesmc_write_key(smc, newkey, buffer, 2); ++ } + + if (ret) + return ret; +@@ -882,15 +1345,30 @@ static ssize_t applesmc_store_fan_speed(struct device *dev, + static ssize_t applesmc_show_fan_manual(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + int ret; + u16 manual = 0; + u8 buffer[2]; ++ char newkey[5]; ++ bool has_newkey = false; + -+ /* ctrls */ -+ int keep_format; /* CID_KEEP_FORMAT; stay ready_for_capture even when all -+ openers close() the device */ -+ int sustain_framerate; /* CID_SUSTAIN_FRAMERATE; duplicate frames to maintain -+ (close to) nominal framerate */ ++ scnprintf(newkey, sizeof(newkey), FAN_MANUAL_FMT, to_index(attr)); + -+ /* buffers stuff */ -+ u8 *image; /* pointer to actual buffers data */ -+ unsigned long int imagesize; /* size of buffers data */ -+ int buffers_number; /* should not be big, 4 is a good choice */ -+ struct v4l2l_buffer buffers[MAX_BUFFERS]; /* inner driver buffers */ -+ int used_buffers; /* number of the actually used buffers */ -+ int max_openers; /* how many times can this device be opened */ ++ ret = applesmc_has_key(smc, newkey, &has_newkey); ++ if (ret) ++ return ret; + -+ s64 write_position; /* number of last written frame + 1 */ -+ struct list_head outbufs_list; /* buffers in output DQBUF order */ -+ int bufpos2index -+ [MAX_BUFFERS]; /* mapping of (read/write_position % used_buffers) -+ * to inner buffer index */ -+ long buffer_size; ++ if (has_newkey) { ++ ret = applesmc_read_key(smc, newkey, buffer, 1); ++ manual = buffer[0]; ++ } else { ++ ret = applesmc_read_key(smc, FANS_MANUAL, buffer, 2); ++ manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; ++ } + +- ret = applesmc_read_key(FANS_MANUAL, buffer, 2); + if (ret) + return ret; + +- manual = ((buffer[0] << 8 | buffer[1]) >> to_index(attr)) & 0x01; + return sysfs_emit(sysfsbuf, "%d\n", manual); + } + +@@ -898,29 +1376,42 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, + struct device_attribute *attr, + const char *sysfsbuf, size_t count) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + int ret; + u8 buffer[2]; ++ char newkey[5]; ++ bool has_newkey = false; + unsigned long input; + u16 val; + + if (kstrtoul(sysfsbuf, 10, &input) < 0) + return -EINVAL; + +- ret = applesmc_read_key(FANS_MANUAL, buffer, 2); ++ scnprintf(newkey, sizeof(newkey), FAN_MANUAL_FMT, to_index(attr)); + -+ /* sustain_framerate stuff */ -+ struct timer_list sustain_timer; -+ unsigned int reread_count; ++ ret = applesmc_has_key(smc, newkey, &has_newkey); + if (ret) +- goto out; ++ return ret; + +- val = (buffer[0] << 8 | buffer[1]); ++ if (has_newkey) { ++ buffer[0] = input & 1; ++ ret = applesmc_write_key(smc, newkey, buffer, 1); ++ } else { ++ ret = applesmc_read_key(smc, FANS_MANUAL, buffer, 2); ++ val = (buffer[0] << 8 | buffer[1]); ++ if (ret) ++ goto out; + +- if (input) +- val = val | (0x01 << to_index(attr)); +- else +- val = val & ~(0x01 << to_index(attr)); ++ if (input) ++ val = val | (0x01 << to_index(attr)); ++ else ++ val = val & ~(0x01 << to_index(attr)); + +- buffer[0] = (val >> 8) & 0xFF; +- buffer[1] = val & 0xFF; ++ buffer[0] = (val >> 8) & 0xFF; ++ buffer[1] = val & 0xFF; + +- ret = applesmc_write_key(FANS_MANUAL, buffer, 2); ++ ret = applesmc_write_key(smc, FANS_MANUAL, buffer, 2); ++ } + + out: + if (ret) +@@ -932,13 +1423,14 @@ static ssize_t applesmc_store_fan_manual(struct device *dev, + static ssize_t applesmc_show_fan_position(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + int ret; + char newkey[5]; + u8 buffer[17]; + + scnprintf(newkey, sizeof(newkey), FAN_ID_FMT, to_index(attr)); + +- ret = applesmc_read_key(newkey, buffer, 16); ++ ret = applesmc_read_key(smc, newkey, buffer, 16); + buffer[16] = 0; + + if (ret) +@@ -950,43 +1442,79 @@ static ssize_t applesmc_show_fan_position(struct device *dev, + static ssize_t applesmc_calibrate_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { +- return sysfs_emit(sysfsbuf, "(%d,%d)\n", rest_x, rest_y); ++ struct applesmc_device *smc = dev_get_drvdata(dev); + -+ /* timeout stuff */ -+ unsigned long timeout_jiffies; /* CID_TIMEOUT; 0 means disabled */ -+ int timeout_image_io; /* CID_TIMEOUT_IMAGE_IO; next opener will -+ * read/write to timeout_image */ -+ u8 *timeout_image; /* copy of it will be captured when timeout passes */ -+ struct v4l2l_buffer timeout_image_buffer; -+ struct timer_list timeout_timer; -+ int timeout_happened; ++ return sysfs_emit(sysfsbuf, "(%d,%d)\n", smc->rest_x, smc->rest_y); + } + + static ssize_t applesmc_calibrate_store(struct device *dev, + struct device_attribute *attr, const char *sysfsbuf, size_t count) + { +- applesmc_calibrate(); ++ struct applesmc_device *smc = dev_get_drvdata(dev); + -+ /* sync stuff */ -+ atomic_t open_count; ++ applesmc_calibrate(smc); + + return count; + } + + static void applesmc_backlight_set(struct work_struct *work) + { +- applesmc_write_key(BACKLIGHT_KEY, backlight_state, 2); ++ struct applesmc_device *smc = container_of(work, struct applesmc_device, backlight_work); + -+ int ready_for_capture; /* set to the number of writers that opened the -+ * device and negotiated format. */ -+ int ready_for_output; /* set to true when no writer is currently attached -+ * this differs slightly from !ready_for_capture, -+ * e.g. when using fallback images */ -+ int active_readers; /* increase if any reader starts streaming */ -+ int announce_all_caps; /* set to false, if device caps (OUTPUT/CAPTURE) -+ * should only be announced if the resp. "ready" -+ * flag is set; default=TRUE */ ++ applesmc_write_key(smc, BACKLIGHT_KEY, smc->backlight_state, 2); + } +-static DECLARE_WORK(backlight_work, &applesmc_backlight_set); + + static void applesmc_brightness_set(struct led_classdev *led_cdev, + enum led_brightness value) + { ++ struct applesmc_device *smc = dev_get_drvdata(led_cdev->dev); + int ret; + +- backlight_state[0] = value; +- ret = queue_work(applesmc_led_wq, &backlight_work); ++ smc->backlight_state[0] = value; ++ ret = queue_work(smc->backlight_wq, &smc->backlight_work); + + if (debug && (!ret)) + dev_dbg(led_cdev->dev, "work was already on the queue.\n"); + } + ++static ssize_t applesmc_BCLM_store(struct device *dev, ++ struct device_attribute *attr, char *sysfsbuf, size_t count) ++{ ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ u8 val; + -+ int min_width, max_width; -+ int min_height, max_height; ++ if (kstrtou8(sysfsbuf, 10, &val) < 0) ++ return -EINVAL; + -+ char card_label[32]; ++ if (val < 0 || val > 100) ++ return -EINVAL; + -+ wait_queue_head_t read_event; -+ spinlock_t lock, list_lock; -+}; ++ if (applesmc_write_key(smc, "BCLM", &val, 1)) ++ return -ENODEV; ++ return count; ++} + -+/* types of opener shows what opener wants to do with loopback */ -+enum opener_type { -+ // clang-format off -+ UNNEGOTIATED = 0, -+ READER = 1, -+ WRITER = 2, -+ // clang-format on -+}; ++static ssize_t applesmc_BCLM_show(struct device *dev, ++ struct device_attribute *attr, char *sysfsbuf) ++{ ++ struct applesmc_device *smc = dev_get_drvdata(dev); ++ u8 val; + -+/* struct keeping state and type of opener */ -+struct v4l2_loopback_opener { -+ enum opener_type type; -+ s64 read_position; /* number of last processed frame + 1 or -+ * write_position - 1 if reader went out of sync */ -+ unsigned int reread_count; -+ struct v4l2_buffer *buffers; -+ int buffers_number; /* should not be big, 4 is a good choice */ -+ int timeout_image_io; ++ if (applesmc_read_key(smc, "BCLM", &val, 1)) ++ return -ENODEV; + -+ struct v4l2_fh fh; -+}; ++ return sysfs_emit(sysfsbuf, "%d\n", val); ++} + -+#define fh_to_opener(ptr) container_of((ptr), struct v4l2_loopback_opener, fh) + static ssize_t applesmc_key_count_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + int ret; + u8 buffer[4]; + u32 count; + +- ret = applesmc_read_key(KEY_COUNT_KEY, buffer, 4); ++ ret = applesmc_read_key(smc, KEY_COUNT_KEY, buffer, 4); + if (ret) + return ret; + +@@ -998,13 +1526,14 @@ static ssize_t applesmc_key_count_show(struct device *dev, + static ssize_t applesmc_key_at_index_read_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + const struct applesmc_entry *entry; + int ret; + +- entry = applesmc_get_entry_by_index(key_at_index); ++ entry = applesmc_get_entry_by_index(smc, smc->key_at_index); + if (IS_ERR(entry)) + return PTR_ERR(entry); +- ret = applesmc_read_entry(entry, sysfsbuf, entry->len); ++ ret = applesmc_read_entry(smc, entry, sysfsbuf, entry->len); + if (ret) + return ret; + +@@ -1014,9 +1543,10 @@ static ssize_t applesmc_key_at_index_read_show(struct device *dev, + static ssize_t applesmc_key_at_index_data_length_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + const struct applesmc_entry *entry; + +- entry = applesmc_get_entry_by_index(key_at_index); ++ entry = applesmc_get_entry_by_index(smc, smc->key_at_index); + if (IS_ERR(entry)) + return PTR_ERR(entry); + +@@ -1026,9 +1556,10 @@ static ssize_t applesmc_key_at_index_data_length_show(struct device *dev, + static ssize_t applesmc_key_at_index_type_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + const struct applesmc_entry *entry; + +- entry = applesmc_get_entry_by_index(key_at_index); ++ entry = applesmc_get_entry_by_index(smc, smc->key_at_index); + if (IS_ERR(entry)) + return PTR_ERR(entry); + +@@ -1038,9 +1569,10 @@ static ssize_t applesmc_key_at_index_type_show(struct device *dev, + static ssize_t applesmc_key_at_index_name_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + const struct applesmc_entry *entry; + +- entry = applesmc_get_entry_by_index(key_at_index); ++ entry = applesmc_get_entry_by_index(smc, smc->key_at_index); + if (IS_ERR(entry)) + return PTR_ERR(entry); + +@@ -1050,28 +1582,25 @@ static ssize_t applesmc_key_at_index_name_show(struct device *dev, + static ssize_t applesmc_key_at_index_show(struct device *dev, + struct device_attribute *attr, char *sysfsbuf) + { +- return sysfs_emit(sysfsbuf, "%d\n", key_at_index); ++ struct applesmc_device *smc = dev_get_drvdata(dev); + -+/* this is heavily inspired by the bttv driver found in the linux kernel */ -+struct v4l2l_format { -+ char *name; -+ int fourcc; /* video4linux 2 */ -+ int depth; /* bit/pixel */ -+ int flags; ++ return sysfs_emit(sysfsbuf, "%d\n", smc->key_at_index); + } + + static ssize_t applesmc_key_at_index_store(struct device *dev, + struct device_attribute *attr, const char *sysfsbuf, size_t count) + { ++ struct applesmc_device *smc = dev_get_drvdata(dev); + unsigned long newkey; + + if (kstrtoul(sysfsbuf, 10, &newkey) < 0 +- || newkey >= smcreg.key_count) ++ || newkey >= smc->reg.key_count) + return -EINVAL; + +- key_at_index = newkey; ++ smc->key_at_index = newkey; + return count; + } + +-static struct led_classdev applesmc_backlight = { +- .name = "smc::kbd_backlight", +- .default_trigger = "nand-disk", +- .brightness_set = applesmc_brightness_set, +-}; +- + static struct applesmc_node_group info_group[] = { + { "name", applesmc_name_show }, + { "key_count", applesmc_key_count_show }, +@@ -1111,19 +1640,25 @@ static struct applesmc_node_group temp_group[] = { + { } + }; + ++static struct applesmc_node_group BCLM_group[] = { ++ { "battery_charge_limit", applesmc_BCLM_show, applesmc_BCLM_store }, ++ { } +}; -+/* set the v4l2l_format.flags to PLANAR for non-packed formats */ -+#define FORMAT_FLAGS_PLANAR 0x01 -+#define FORMAT_FLAGS_COMPRESSED 0x02 + -+#include "v4l2loopback_formats.h" + /* Module stuff */ + + /* + * applesmc_destroy_nodes - remove files and free associated memory + */ +-static void applesmc_destroy_nodes(struct applesmc_node_group *groups) ++static void applesmc_destroy_nodes(struct applesmc_device *smc, ++ struct applesmc_node_group *groups) + { + struct applesmc_node_group *grp; + struct applesmc_dev_attr *node; + + for (grp = groups; grp->nodes; grp++) { + for (node = grp->nodes; node->sda.dev_attr.attr.name; node++) +- sysfs_remove_file(&pdev->dev.kobj, ++ sysfs_remove_file(&smc->dev->dev.kobj, + &node->sda.dev_attr.attr); + kfree(grp->nodes); + grp->nodes = NULL; +@@ -1133,7 +1668,8 @@ static void applesmc_destroy_nodes(struct applesmc_node_group *groups) + /* + * applesmc_create_nodes - create a two-dimensional group of sysfs files + */ +-static int applesmc_create_nodes(struct applesmc_node_group *groups, int num) ++static int applesmc_create_nodes(struct applesmc_device *smc, ++ struct applesmc_node_group *groups, int num) + { + struct applesmc_node_group *grp; + struct applesmc_dev_attr *node; +@@ -1157,7 +1693,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num) + sysfs_attr_init(attr); + attr->name = node->name; + attr->mode = 0444 | (grp->store ? 0200 : 0); +- ret = sysfs_create_file(&pdev->dev.kobj, attr); ++ ret = sysfs_create_file(&smc->dev->dev.kobj, attr); + if (ret) { + attr->name = NULL; + goto out; +@@ -1167,57 +1703,56 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num) + + return 0; + out: +- applesmc_destroy_nodes(groups); ++ applesmc_destroy_nodes(smc, groups); + return ret; + } + + /* Create accelerometer resources */ +-static int applesmc_create_accelerometer(void) ++static int applesmc_create_accelerometer(struct applesmc_device *smc) + { + int ret; +- +- if (!smcreg.has_accelerometer) ++ if (!smc->reg.has_accelerometer) + return 0; + +- ret = applesmc_create_nodes(accelerometer_group, 1); ++ ret = applesmc_create_nodes(smc, accelerometer_group, 1); + if (ret) + goto out; + +- applesmc_idev = input_allocate_device(); +- if (!applesmc_idev) { ++ smc->idev = input_allocate_device(); ++ if (!smc->idev) { + ret = -ENOMEM; + goto out_sysfs; + } + + /* initial calibrate for the input device */ +- applesmc_calibrate(); ++ applesmc_calibrate(smc); + + /* initialize the input device */ +- applesmc_idev->name = "applesmc"; +- applesmc_idev->id.bustype = BUS_HOST; +- applesmc_idev->dev.parent = &pdev->dev; +- input_set_abs_params(applesmc_idev, ABS_X, ++ smc->idev->name = "applesmc"; ++ smc->idev->id.bustype = BUS_HOST; ++ smc->idev->dev.parent = &smc->dev->dev; ++ input_set_abs_params(smc->idev, ABS_X, + -256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT); +- input_set_abs_params(applesmc_idev, ABS_Y, ++ input_set_abs_params(smc->idev, ABS_Y, + -256, 256, APPLESMC_INPUT_FUZZ, APPLESMC_INPUT_FLAT); + +- ret = input_setup_polling(applesmc_idev, applesmc_idev_poll); ++ ret = input_setup_polling(smc->idev, applesmc_idev_poll); + if (ret) + goto out_idev; + +- input_set_poll_interval(applesmc_idev, APPLESMC_POLL_INTERVAL); ++ input_set_poll_interval(smc->idev, APPLESMC_POLL_INTERVAL); + +- ret = input_register_device(applesmc_idev); ++ ret = input_register_device(smc->idev); + if (ret) + goto out_idev; + + return 0; + + out_idev: +- input_free_device(applesmc_idev); ++ input_free_device(smc->idev); + + out_sysfs: +- applesmc_destroy_nodes(accelerometer_group); ++ applesmc_destroy_nodes(smc, accelerometer_group); + + out: + pr_warn("driver init failed (ret=%d)!\n", ret); +@@ -1225,44 +1760,55 @@ static int applesmc_create_accelerometer(void) + } + + /* Release all resources used by the accelerometer */ +-static void applesmc_release_accelerometer(void) ++static void applesmc_release_accelerometer(struct applesmc_device *smc) + { +- if (!smcreg.has_accelerometer) ++ if (!smc->reg.has_accelerometer) + return; +- input_unregister_device(applesmc_idev); +- applesmc_destroy_nodes(accelerometer_group); ++ input_unregister_device(smc->idev); ++ applesmc_destroy_nodes(smc, accelerometer_group); + } + +-static int applesmc_create_light_sensor(void) ++static int applesmc_create_light_sensor(struct applesmc_device *smc) + { +- if (!smcreg.num_light_sensors) ++ if (!smc->reg.num_light_sensors) + return 0; +- return applesmc_create_nodes(light_sensor_group, 1); ++ return applesmc_create_nodes(smc, light_sensor_group, 1); + } + +-static void applesmc_release_light_sensor(void) ++static void applesmc_release_light_sensor(struct applesmc_device *smc) + { +- if (!smcreg.num_light_sensors) ++ if (!smc->reg.num_light_sensors) + return; +- applesmc_destroy_nodes(light_sensor_group); ++ applesmc_destroy_nodes(smc, light_sensor_group); + } + +-static int applesmc_create_key_backlight(void) ++static int applesmc_create_key_backlight(struct applesmc_device *smc) + { +- if (!smcreg.has_key_backlight) ++ int ret; + -+#ifndef V4L2_TYPE_IS_CAPTURE -+#define V4L2_TYPE_IS_CAPTURE(type) \ -+ ((type) == V4L2_BUF_TYPE_VIDEO_CAPTURE || \ -+ (type) == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) -+#endif /* V4L2_TYPE_IS_CAPTURE */ -+#ifndef V4L2_TYPE_IS_OUTPUT -+#define V4L2_TYPE_IS_OUTPUT(type) \ -+ ((type) == V4L2_BUF_TYPE_VIDEO_OUTPUT || \ -+ (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) -+#endif /* V4L2_TYPE_IS_OUTPUT */ ++ if (!smc->reg.has_key_backlight) + return 0; +- applesmc_led_wq = create_singlethread_workqueue("applesmc-led"); +- if (!applesmc_led_wq) ++ smc->backlight_wq = create_singlethread_workqueue("applesmc-led"); ++ if (!smc->backlight_wq) + return -ENOMEM; +- return led_classdev_register(&pdev->dev, &applesmc_backlight); + -+/* whether the format can be changed */ -+/* the format is fixated if we -+ - have writers (ready_for_capture>0) -+ - and/or have readers (active_readers>0) -+*/ -+#define V4L2LOOPBACK_IS_FIXED_FMT(device) \ -+ (device->ready_for_capture > 0 || device->active_readers > 0 || \ -+ device->keep_format) ++ INIT_WORK(&smc->backlight_work, applesmc_backlight_set); ++ smc->backlight_dev.name = "smc::kbd_backlight"; ++ smc->backlight_dev.default_trigger = "nand-disk"; ++ smc->backlight_dev.brightness_set = applesmc_brightness_set; ++ ret = led_classdev_register(&smc->dev->dev, &smc->backlight_dev); ++ if (ret) ++ destroy_workqueue(smc->backlight_wq); + -+static const unsigned int FORMATS = ARRAY_SIZE(formats); ++ return ret; + } + +-static void applesmc_release_key_backlight(void) ++static void applesmc_release_key_backlight(struct applesmc_device *smc) + { +- if (!smcreg.has_key_backlight) ++ if (!smc->reg.has_key_backlight) + return; +- led_classdev_unregister(&applesmc_backlight); +- destroy_workqueue(applesmc_led_wq); ++ led_classdev_unregister(&smc->backlight_dev); ++ destroy_workqueue(smc->backlight_wq); + } + + static int applesmc_dmi_match(const struct dmi_system_id *id) +@@ -1291,6 +1837,10 @@ static const struct dmi_system_id applesmc_whitelist[] __initconst = { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "Macmini") }, + }, ++ { applesmc_dmi_match, "Apple iMacPro", { ++ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "iMacPro") }, ++ }, + { applesmc_dmi_match, "Apple MacPro", { + DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") }, +@@ -1306,90 +1856,91 @@ static const struct dmi_system_id applesmc_whitelist[] __initconst = { + { .ident = NULL } + }; + +-static int __init applesmc_init(void) ++static int applesmc_create_modules(struct applesmc_device *smc) + { + int ret; + +- if (!dmi_check_system(applesmc_whitelist)) { +- pr_warn("supported laptop not found!\n"); +- ret = -ENODEV; +- goto out; +- } +- +- if (!request_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS, +- "applesmc")) { +- ret = -ENXIO; +- goto out; +- } +- +- ret = platform_driver_register(&applesmc_driver); +- if (ret) +- goto out_region; +- +- pdev = platform_device_register_simple("applesmc", APPLESMC_DATA_PORT, +- NULL, 0); +- if (IS_ERR(pdev)) { +- ret = PTR_ERR(pdev); +- goto out_driver; +- } +- +- /* create register cache */ +- ret = applesmc_init_smcreg(); ++ ret = applesmc_create_nodes(smc, info_group, 1); + if (ret) +- goto out_device; +- +- ret = applesmc_create_nodes(info_group, 1); ++ goto out; ++ ret = applesmc_create_nodes(smc, BCLM_group, 1); + if (ret) +- goto out_smcreg; ++ goto out_info; + +- ret = applesmc_create_nodes(fan_group, smcreg.fan_count); ++ ret = applesmc_create_nodes(smc, fan_group, smc->reg.fan_count); + if (ret) +- goto out_info; ++ goto out_bclm; + +- ret = applesmc_create_nodes(temp_group, smcreg.index_count); ++ ret = applesmc_create_nodes(smc, temp_group, smc->reg.index_count); + if (ret) + goto out_fans; + +- ret = applesmc_create_accelerometer(); ++ ret = applesmc_create_accelerometer(smc); + if (ret) + goto out_temperature; + +- ret = applesmc_create_light_sensor(); ++ ret = applesmc_create_light_sensor(smc); + if (ret) + goto out_accelerometer; + +- ret = applesmc_create_key_backlight(); ++ ret = applesmc_create_key_backlight(smc); + if (ret) + goto out_light_sysfs; + +- hwmon_dev = hwmon_device_register(&pdev->dev); +- if (IS_ERR(hwmon_dev)) { +- ret = PTR_ERR(hwmon_dev); ++ smc->hwmon_dev = hwmon_device_register(&smc->dev->dev); ++ if (IS_ERR(smc->hwmon_dev)) { ++ ret = PTR_ERR(smc->hwmon_dev); + goto out_light_ledclass; + } + + return 0; + + out_light_ledclass: +- applesmc_release_key_backlight(); ++ applesmc_release_key_backlight(smc); + out_light_sysfs: +- applesmc_release_light_sensor(); ++ applesmc_release_light_sensor(smc); + out_accelerometer: +- applesmc_release_accelerometer(); ++ applesmc_release_accelerometer(smc); + out_temperature: +- applesmc_destroy_nodes(temp_group); ++ applesmc_destroy_nodes(smc, temp_group); + out_fans: +- applesmc_destroy_nodes(fan_group); ++ applesmc_destroy_nodes(smc, fan_group); ++out_bclm: ++ applesmc_destroy_nodes(smc, BCLM_group); + out_info: +- applesmc_destroy_nodes(info_group); +-out_smcreg: +- applesmc_destroy_smcreg(); +-out_device: +- platform_device_unregister(pdev); +-out_driver: +- platform_driver_unregister(&applesmc_driver); +-out_region: +- release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS); ++ applesmc_destroy_nodes(smc, info_group); ++out: ++ return ret; ++} + -+static char *fourcc2str(unsigned int fourcc, char buf[4]) ++static void applesmc_destroy_modules(struct applesmc_device *smc) +{ -+ buf[0] = (fourcc >> 0) & 0xFF; -+ buf[1] = (fourcc >> 8) & 0xFF; -+ buf[2] = (fourcc >> 16) & 0xFF; -+ buf[3] = (fourcc >> 24) & 0xFF; -+ -+ return buf; ++ hwmon_device_unregister(smc->hwmon_dev); ++ applesmc_release_key_backlight(smc); ++ applesmc_release_light_sensor(smc); ++ applesmc_release_accelerometer(smc); ++ applesmc_destroy_nodes(smc, temp_group); ++ applesmc_destroy_nodes(smc, fan_group); ++ applesmc_destroy_nodes(smc, BCLM_group); ++ applesmc_destroy_nodes(smc, info_group); +} + -+static const struct v4l2l_format *format_by_fourcc(int fourcc) ++static int __init applesmc_init(void) +{ -+ unsigned int i; ++ int ret; + -+ for (i = 0; i < FORMATS; i++) { -+ if (formats[i].fourcc == fourcc) -+ return formats + i; ++ if (!dmi_check_system(applesmc_whitelist)) { ++ pr_warn("supported laptop not found!\n"); ++ ret = -ENODEV; ++ goto out; + } + -+ dprintk("unsupported format '%c%c%c%c'\n", (fourcc >> 0) & 0xFF, -+ (fourcc >> 8) & 0xFF, (fourcc >> 16) & 0xFF, -+ (fourcc >> 24) & 0xFF); -+ return NULL; -+} ++ ret = acpi_bus_register_driver(&applesmc_driver); ++ if (ret) ++ goto out; + -+static void pix_format_set_size(struct v4l2_pix_format *f, -+ const struct v4l2l_format *fmt, -+ unsigned int width, unsigned int height) -+{ -+ f->width = width; -+ f->height = height; ++ return 0; + -+ if (fmt->flags & FORMAT_FLAGS_PLANAR) { -+ f->bytesperline = width; /* Y plane */ -+ f->sizeimage = (width * height * fmt->depth) >> 3; -+ } else if (fmt->flags & FORMAT_FLAGS_COMPRESSED) { -+ /* doesn't make sense for compressed formats */ -+ f->bytesperline = 0; -+ f->sizeimage = (width * height * fmt->depth) >> 3; -+ } else { -+ f->bytesperline = (width * fmt->depth) >> 3; -+ f->sizeimage = height * f->bytesperline; -+ } -+} + out: + pr_warn("driver init failed (ret=%d)!\n", ret); + return ret; +@@ -1397,23 +1948,14 @@ static int __init applesmc_init(void) + + static void __exit applesmc_exit(void) + { +- hwmon_device_unregister(hwmon_dev); +- applesmc_release_key_backlight(); +- applesmc_release_light_sensor(); +- applesmc_release_accelerometer(); +- applesmc_destroy_nodes(temp_group); +- applesmc_destroy_nodes(fan_group); +- applesmc_destroy_nodes(info_group); +- applesmc_destroy_smcreg(); +- platform_device_unregister(pdev); +- platform_driver_unregister(&applesmc_driver); +- release_region(APPLESMC_DATA_PORT, APPLESMC_NR_PORTS); ++ acpi_bus_unregister_driver(&applesmc_driver); + } + + module_init(applesmc_init); + module_exit(applesmc_exit); + + MODULE_AUTHOR("Nicolas Boichat"); ++MODULE_AUTHOR("Paul Pawlowski"); + MODULE_DESCRIPTION("Apple SMC"); + MODULE_LICENSE("GPL v2"); + MODULE_DEVICE_TABLE(dmi, applesmc_whitelist); +diff --git a/drivers/hwmon/steamdeck-hwmon.c b/drivers/hwmon/steamdeck-hwmon.c +new file mode 100644 +index 000000000000..9d0a5471b181 +--- /dev/null ++++ b/drivers/hwmon/steamdeck-hwmon.c +@@ -0,0 +1,294 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * Steam Deck EC sensors driver ++ * ++ * Copyright (C) 2021-2022 Valve Corporation ++ */ + -+static int v4l2l_fill_format(struct v4l2_format *fmt, int capture, -+ const u32 minwidth, const u32 maxwidth, -+ const u32 minheight, const u32 maxheight) ++#include ++#include ++#include ++ ++#define STEAMDECK_HWMON_NAME "steamdeck-hwmon" ++ ++struct steamdeck_hwmon { ++ struct acpi_device *adev; ++}; ++ ++static long ++steamdeck_hwmon_get(struct steamdeck_hwmon *sd, const char *method) +{ -+ u32 width = fmt->fmt.pix.width, height = fmt->fmt.pix.height; -+ u32 pixelformat = fmt->fmt.pix.pixelformat; -+ struct v4l2_format fmt0 = *fmt; -+ u32 bytesperline = 0, sizeimage = 0; -+ if (!width) -+ width = V4L2LOOPBACK_SIZE_DEFAULT_WIDTH; -+ if (!height) -+ height = V4L2LOOPBACK_SIZE_DEFAULT_HEIGHT; -+ if (width < minwidth) -+ width = minwidth; -+ if (width > maxwidth) -+ width = maxwidth; -+ if (height < minheight) -+ height = minheight; -+ if (height > maxheight) -+ height = maxheight; ++ unsigned long long val; ++ if (ACPI_FAILURE(acpi_evaluate_integer(sd->adev->handle, ++ (char *)method, NULL, &val))) ++ return -EIO; + -+ /* sets: width,height,pixelformat,bytesperline,sizeimage */ -+ if (!(V4L2_TYPE_IS_MULTIPLANAR(fmt0.type))) { -+ fmt0.fmt.pix.bytesperline = 0; -+ fmt0.fmt.pix.sizeimage = 0; -+ } ++ return val; ++} + -+ if (0) { -+ ; -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0) -+ } else if (!v4l2_fill_pixfmt(&fmt0.fmt.pix, pixelformat, width, -+ height)) { -+ ; -+ } else if (!v4l2_fill_pixfmt_mp(&fmt0.fmt.pix_mp, pixelformat, width, -+ height)) { -+ ; -+#endif -+ } else { -+ const struct v4l2l_format *format = -+ format_by_fourcc(pixelformat); -+ if (!format) -+ return -EINVAL; -+ pix_format_set_size(&fmt0.fmt.pix, format, width, height); -+ fmt0.fmt.pix.pixelformat = format->fourcc; -+ } ++static int ++steamdeck_hwmon_read(struct device *dev, enum hwmon_sensor_types type, ++ u32 attr, int channel, long *out) ++{ ++ struct steamdeck_hwmon *sd = dev_get_drvdata(dev); + -+ if (V4L2_TYPE_IS_MULTIPLANAR(fmt0.type)) { -+ *fmt = fmt0; ++ switch (type) { ++ case hwmon_curr: ++ if (attr != hwmon_curr_input) ++ return -EOPNOTSUPP; + -+ if ((fmt->fmt.pix_mp.colorspace == V4L2_COLORSPACE_DEFAULT) || -+ (fmt->fmt.pix_mp.colorspace > V4L2_COLORSPACE_DCI_P3)) -+ fmt->fmt.pix_mp.colorspace = V4L2_COLORSPACE_SRGB; -+ if (V4L2_FIELD_ANY == fmt->fmt.pix_mp.field) -+ fmt->fmt.pix_mp.field = V4L2_FIELD_NONE; -+ if (capture) -+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; -+ else -+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; -+ } else { -+ bytesperline = fmt->fmt.pix.bytesperline; -+ sizeimage = fmt->fmt.pix.sizeimage; -+ -+ *fmt = fmt0; -+ -+ if (!fmt->fmt.pix.bytesperline) -+ fmt->fmt.pix.bytesperline = bytesperline; -+ if (!fmt->fmt.pix.sizeimage) -+ fmt->fmt.pix.sizeimage = sizeimage; -+ -+ if ((fmt->fmt.pix.colorspace == V4L2_COLORSPACE_DEFAULT) || -+ (fmt->fmt.pix.colorspace > V4L2_COLORSPACE_DCI_P3)) -+ fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; -+ if (V4L2_FIELD_ANY == fmt->fmt.pix.field) -+ fmt->fmt.pix.field = V4L2_FIELD_NONE; -+ if (capture) -+ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; -+ else -+ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; -+ } -+ -+ return 0; -+} -+ -+/* Checks if v4l2l_fill_format() has set a valid, fixed sizeimage val. */ -+static bool v4l2l_pix_format_has_valid_sizeimage(struct v4l2_format *fmt) -+{ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0) -+ const struct v4l2_format_info *info; -+ -+ info = v4l2_format_info(fmt->fmt.pix.pixelformat); -+ if (info && info->mem_planes == 1) -+ return true; -+#endif -+ -+ return false; -+} -+ -+static int pix_format_eq(const struct v4l2_pix_format *ref, -+ const struct v4l2_pix_format *tgt, int strict) -+{ -+ /* check if the two formats are equivalent. -+ * ANY fields are handled gracefully -+ */ -+#define _pix_format_eq0(x) \ -+ if (ref->x != tgt->x) \ -+ result = 0 -+#define _pix_format_eq1(x, def) \ -+ do { \ -+ if ((def != tgt->x) && (ref->x != tgt->x)) { \ -+ printk(KERN_INFO #x " failed"); \ -+ result = 0; \ -+ } \ -+ } while (0) -+ int result = 1; -+ _pix_format_eq0(width); -+ _pix_format_eq0(height); -+ _pix_format_eq0(pixelformat); -+ if (!strict) -+ return result; -+ _pix_format_eq1(field, V4L2_FIELD_ANY); -+ _pix_format_eq0(bytesperline); -+ _pix_format_eq0(sizeimage); -+ _pix_format_eq1(colorspace, V4L2_COLORSPACE_DEFAULT); -+ return result; -+} -+ -+static struct v4l2_loopback_device *v4l2loopback_getdevice(struct file *f); -+static int inner_try_setfmt(struct file *file, struct v4l2_format *fmt) -+{ -+ int capture = V4L2_TYPE_IS_CAPTURE(fmt->type); -+ struct v4l2_loopback_device *dev; -+ int needschange = 0; -+ char buf[5]; -+ buf[4] = 0; ++ *out = steamdeck_hwmon_get(sd, "PDAM"); ++ if (*out < 0) ++ return *out; ++ break; ++ case hwmon_in: ++ if (attr != hwmon_in_input) ++ return -EOPNOTSUPP; + -+ dev = v4l2loopback_getdevice(file); ++ *out = steamdeck_hwmon_get(sd, "PDVL"); ++ if (*out < 0) ++ return *out; ++ break; ++ case hwmon_temp: ++ if (attr != hwmon_temp_input) ++ return -EOPNOTSUPP; + -+ needschange = !(pix_format_eq(&dev->pix_format, &fmt->fmt.pix, 0)); -+ if (V4L2LOOPBACK_IS_FIXED_FMT(dev)) { -+ fmt->fmt.pix = dev->pix_format; -+ if (needschange) { -+ if (dev->active_readers > 0 && capture) { -+ /* cannot call fmt_cap while there are readers */ -+ return -EBUSY; -+ } -+ if (dev->ready_for_capture > 0 && !capture) { -+ /* cannot call fmt_out while there are writers */ -+ return -EBUSY; -+ } ++ *out = steamdeck_hwmon_get(sd, "BATT"); ++ if (*out < 0) ++ return *out; ++ /* ++ * Assuming BATT returns deg C we need to mutiply it ++ * by 1000 to convert to mC ++ */ ++ *out *= 1000; ++ break; ++ case hwmon_fan: ++ switch (attr) { ++ case hwmon_fan_input: ++ *out = steamdeck_hwmon_get(sd, "FANR"); ++ if (*out < 0) ++ return *out; ++ break; ++ case hwmon_fan_target: ++ *out = steamdeck_hwmon_get(sd, "FSSR"); ++ if (*out < 0) ++ return *out; ++ break; ++ case hwmon_fan_fault: ++ *out = steamdeck_hwmon_get(sd, "FANC"); ++ if (*out < 0) ++ return *out; ++ /* ++ * FANC (Fan check): ++ * 0: Abnormal ++ * 1: Normal ++ */ ++ *out = !*out; ++ break; ++ default: ++ return -EOPNOTSUPP; + } -+ } -+ if (v4l2l_fill_format(fmt, capture, dev->min_width, dev->max_width, -+ dev->min_height, dev->max_height) != 0) { -+ return -EINVAL; ++ break; ++ default: ++ return -EOPNOTSUPP; + } + -+ if (1) { -+ char buf[5]; -+ buf[4] = 0; -+ dprintk("capFOURCC=%s\n", -+ fourcc2str(dev->pix_format.pixelformat, buf)); -+ } + return 0; +} + -+static int set_timeperframe(struct v4l2_loopback_device *dev, -+ struct v4l2_fract *tpf) ++static int ++steamdeck_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, ++ u32 attr, int channel, const char **str) +{ -+ if ((tpf->denominator < 1) || (tpf->numerator < 1)) { -+ return -EINVAL; ++ switch (type) { ++ /* ++ * These two aren't, strictly speaking, measured. EC ++ * firmware just reports what PD negotiation resulted ++ * in. ++ */ ++ case hwmon_curr: ++ *str = "PD Contract Current"; ++ break; ++ case hwmon_in: ++ *str = "PD Contract Voltage"; ++ break; ++ case hwmon_temp: ++ *str = "Battery Temp"; ++ break; ++ case hwmon_fan: ++ *str = "System Fan"; ++ break; ++ default: ++ return -EOPNOTSUPP; + } -+ dev->capture_param.timeperframe = *tpf; -+ dev->frame_jiffies = max(1UL, msecs_to_jiffies(1000) * tpf->numerator / -+ tpf->denominator); ++ + return 0; +} + -+static struct v4l2_loopback_device *v4l2loopback_cd2dev(struct device *cd); -+ -+/* device attributes */ -+/* available via sysfs: /sys/devices/virtual/video4linux/video* */ -+ -+static ssize_t attr_show_format(struct device *cd, -+ struct device_attribute *attr, char *buf) ++static int ++steamdeck_hwmon_write(struct device *dev, enum hwmon_sensor_types type, ++ u32 attr, int channel, long val) +{ -+ /* gets the current format as "FOURCC:WxH@f/s", e.g. "YUYV:320x240@1000/30" */ -+ struct v4l2_loopback_device *dev = v4l2loopback_cd2dev(cd); -+ const struct v4l2_fract *tpf; -+ char buf4cc[5], buf_fps[32]; -+ -+ if (!dev || !V4L2LOOPBACK_IS_FIXED_FMT(dev)) -+ return 0; -+ tpf = &dev->capture_param.timeperframe; ++ struct steamdeck_hwmon *sd = dev_get_drvdata(dev); + -+ fourcc2str(dev->pix_format.pixelformat, buf4cc); -+ buf4cc[4] = 0; -+ if (tpf->numerator == 1) -+ snprintf(buf_fps, sizeof(buf_fps), "%d", tpf->denominator); -+ else -+ snprintf(buf_fps, sizeof(buf_fps), "%d/%d", tpf->denominator, -+ tpf->numerator); -+ return sprintf(buf, "%4s:%dx%d@%s\n", buf4cc, dev->pix_format.width, -+ dev->pix_format.height, buf_fps); -+} ++ if (type != hwmon_fan || ++ attr != hwmon_fan_target) ++ return -EOPNOTSUPP; + -+static ssize_t attr_store_format(struct device *cd, -+ struct device_attribute *attr, const char *buf, -+ size_t len) -+{ -+ struct v4l2_loopback_device *dev = v4l2loopback_cd2dev(cd); -+ int fps_num = 0, fps_den = 1; ++ val = clamp_val(val, 0, 7300); + -+ if (!dev) -+ return -ENODEV; ++ if (ACPI_FAILURE(acpi_execute_simple_method(sd->adev->handle, ++ "FANS", val))) ++ return -EIO; + -+ /* only fps changing is supported */ -+ if (sscanf(buf, "@%d/%d", &fps_num, &fps_den) > 0) { -+ struct v4l2_fract f = { .numerator = fps_den, -+ .denominator = fps_num }; -+ int err = 0; -+ if ((err = set_timeperframe(dev, &f)) < 0) -+ return err; -+ return len; -+ } -+ return -EINVAL; ++ return 0; +} + -+static DEVICE_ATTR(format, S_IRUGO | S_IWUSR, attr_show_format, -+ attr_store_format); -+ -+static ssize_t attr_show_buffers(struct device *cd, -+ struct device_attribute *attr, char *buf) ++static umode_t ++steamdeck_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, ++ u32 attr, int channel) +{ -+ struct v4l2_loopback_device *dev = v4l2loopback_cd2dev(cd); -+ -+ if (!dev) -+ return -ENODEV; ++ if (type == hwmon_fan && ++ attr == hwmon_fan_target) ++ return 0644; + -+ return sprintf(buf, "%d\n", dev->used_buffers); ++ return 0444; +} + -+static DEVICE_ATTR(buffers, S_IRUGO, attr_show_buffers, NULL); ++static const struct hwmon_channel_info *steamdeck_hwmon_info[] = { ++ HWMON_CHANNEL_INFO(in, ++ HWMON_I_INPUT | HWMON_I_LABEL), ++ HWMON_CHANNEL_INFO(curr, ++ HWMON_C_INPUT | HWMON_C_LABEL), ++ HWMON_CHANNEL_INFO(temp, ++ HWMON_T_INPUT | HWMON_T_LABEL), ++ HWMON_CHANNEL_INFO(fan, ++ HWMON_F_INPUT | HWMON_F_LABEL | ++ HWMON_F_TARGET | HWMON_F_FAULT), ++ NULL ++}; + -+static ssize_t attr_show_maxopeners(struct device *cd, -+ struct device_attribute *attr, char *buf) -+{ -+ struct v4l2_loopback_device *dev = v4l2loopback_cd2dev(cd); ++static const struct hwmon_ops steamdeck_hwmon_ops = { ++ .is_visible = steamdeck_hwmon_is_visible, ++ .read = steamdeck_hwmon_read, ++ .read_string = steamdeck_hwmon_read_string, ++ .write = steamdeck_hwmon_write, ++}; + -+ if (!dev) -+ return -ENODEV; ++static const struct hwmon_chip_info steamdeck_hwmon_chip_info = { ++ .ops = &steamdeck_hwmon_ops, ++ .info = steamdeck_hwmon_info, ++}; + -+ return sprintf(buf, "%d\n", dev->max_openers); -+} + -+static ssize_t attr_store_maxopeners(struct device *cd, -+ struct device_attribute *attr, -+ const char *buf, size_t len) ++static ssize_t ++steamdeck_hwmon_simple_store(struct device *dev, const char *buf, size_t count, ++ const char *method, ++ unsigned long upper_limit) +{ -+ struct v4l2_loopback_device *dev = NULL; -+ unsigned long curr = 0; ++ struct steamdeck_hwmon *sd = dev_get_drvdata(dev); ++ unsigned long value; + -+ if (kstrtoul(buf, 0, &curr)) ++ if (kstrtoul(buf, 10, &value) || value >= upper_limit) + return -EINVAL; + -+ dev = v4l2loopback_cd2dev(cd); -+ if (!dev) -+ return -ENODEV; ++ if (ACPI_FAILURE(acpi_execute_simple_method(sd->adev->handle, ++ (char *)method, value))) ++ return -EIO; + -+ if (dev->max_openers == curr) -+ return len; ++ return count; ++} + -+ if (curr > __INT_MAX__ || dev->open_count.counter > curr) { -+ /* request to limit to less openers as are currently attached to us */ -+ return -EINVAL; -+ } ++static ssize_t ++steamdeck_hwmon_simple_show(struct device *dev, char *buf, ++ const char *method) ++{ ++ struct steamdeck_hwmon *sd = dev_get_drvdata(dev); ++ unsigned long value; + -+ dev->max_openers = (int)curr; ++ value = steamdeck_hwmon_get(sd, method); ++ if (value < 0) ++ return value; + -+ return len; ++ return sprintf(buf, "%ld\n", value); +} + -+static DEVICE_ATTR(max_openers, S_IRUGO | S_IWUSR, attr_show_maxopeners, -+ attr_store_maxopeners); -+ -+static ssize_t attr_show_state(struct device *cd, struct device_attribute *attr, -+ char *buf) -+{ -+ struct v4l2_loopback_device *dev = v4l2loopback_cd2dev(cd); ++#define STEAMDECK_HWMON_ATTR_RW(_name, _set_method, _get_method, \ ++ _upper_limit) \ ++ static ssize_t _name##_show(struct device *dev, \ ++ struct device_attribute *attr, \ ++ char *buf) \ ++ { \ ++ return steamdeck_hwmon_simple_show(dev, buf, \ ++ _get_method); \ ++ } \ ++ static ssize_t _name##_store(struct device *dev, \ ++ struct device_attribute *attr, \ ++ const char *buf, size_t count) \ ++ { \ ++ return steamdeck_hwmon_simple_store(dev, buf, count, \ ++ _set_method, \ ++ _upper_limit); \ ++ } \ ++ static DEVICE_ATTR_RW(_name) + -+ if (!dev) -+ return -ENODEV; ++STEAMDECK_HWMON_ATTR_RW(max_battery_charge_level, "FCBL", "SFBL", 101); ++STEAMDECK_HWMON_ATTR_RW(max_battery_charge_rate, "CHGR", "GCHR", 101); + -+ if (dev->ready_for_capture) -+ return sprintf(buf, "capture\n"); -+ if (dev->ready_for_output) -+ return sprintf(buf, "output\n"); ++static struct attribute *steamdeck_hwmon_attributes[] = { ++ &dev_attr_max_battery_charge_level.attr, ++ &dev_attr_max_battery_charge_rate.attr, ++ NULL ++}; + -+ return -EAGAIN; -+} ++static const struct attribute_group steamdeck_hwmon_group = { ++ .attrs = steamdeck_hwmon_attributes, ++}; + -+static DEVICE_ATTR(state, S_IRUGO, attr_show_state, NULL); ++static const struct attribute_group *steamdeck_hwmon_groups[] = { ++ &steamdeck_hwmon_group, ++ NULL ++}; + -+static void v4l2loopback_remove_sysfs(struct video_device *vdev) ++static int steamdeck_hwmon_probe(struct platform_device *pdev) +{ -+#define V4L2_SYSFS_DESTROY(x) device_remove_file(&vdev->dev, &dev_attr_##x) -+ -+ if (vdev) { -+ V4L2_SYSFS_DESTROY(format); -+ V4L2_SYSFS_DESTROY(buffers); -+ V4L2_SYSFS_DESTROY(max_openers); -+ V4L2_SYSFS_DESTROY(state); -+ /* ... */ -+ } -+} ++ struct device *dev = &pdev->dev; ++ struct steamdeck_hwmon *sd; ++ struct device *hwmon; + -+static void v4l2loopback_create_sysfs(struct video_device *vdev) -+{ -+ int res = 0; ++ sd = devm_kzalloc(dev, sizeof(*sd), GFP_KERNEL); ++ if (!sd) ++ return -ENOMEM; + -+#define V4L2_SYSFS_CREATE(x) \ -+ res = device_create_file(&vdev->dev, &dev_attr_##x); \ -+ if (res < 0) \ -+ break -+ if (!vdev) -+ return; -+ do { -+ V4L2_SYSFS_CREATE(format); -+ V4L2_SYSFS_CREATE(buffers); -+ V4L2_SYSFS_CREATE(max_openers); -+ V4L2_SYSFS_CREATE(state); -+ /* ... */ -+ } while (0); ++ sd->adev = ACPI_COMPANION(dev->parent); ++ hwmon = devm_hwmon_device_register_with_info(dev, ++ "steamdeck_hwmon", ++ sd, ++ &steamdeck_hwmon_chip_info, ++ steamdeck_hwmon_groups); ++ if (IS_ERR(hwmon)) { ++ dev_err(dev, "Failed to register HWMON device"); ++ return PTR_ERR(hwmon); ++ } + -+ if (res >= 0) -+ return; -+ dev_err(&vdev->dev, "%s error: %d\n", __func__, res); ++ return 0; +} + -+/* Event APIs */ -+ -+#define V4L2LOOPBACK_EVENT_BASE (V4L2_EVENT_PRIVATE_START) -+#define V4L2LOOPBACK_EVENT_OFFSET 0x08E00000 -+#define V4L2_EVENT_PRI_CLIENT_USAGE \ -+ (V4L2LOOPBACK_EVENT_BASE + V4L2LOOPBACK_EVENT_OFFSET + 1) -+ -+struct v4l2_event_client_usage { -+ __u32 count; ++static const struct platform_device_id steamdeck_hwmon_id_table[] = { ++ { .name = STEAMDECK_HWMON_NAME }, ++ {} +}; ++MODULE_DEVICE_TABLE(platform, steamdeck_hwmon_id_table); + -+/* global module data */ -+/* find a device based on it's device-number (e.g. '3' for /dev/video3) */ -+struct v4l2loopback_lookup_cb_data { -+ int device_nr; -+ struct v4l2_loopback_device *device; ++static struct platform_driver steamdeck_hwmon_driver = { ++ .probe = steamdeck_hwmon_probe, ++ .driver = { ++ .name = STEAMDECK_HWMON_NAME, ++ }, ++ .id_table = steamdeck_hwmon_id_table, +}; -+static int v4l2loopback_lookup_cb(int id, void *ptr, void *data) -+{ -+ struct v4l2_loopback_device *device = ptr; -+ struct v4l2loopback_lookup_cb_data *cbdata = data; -+ if (cbdata && device && device->vdev) { -+ if (device->vdev->num == cbdata->device_nr) { -+ cbdata->device = device; -+ cbdata->device_nr = id; -+ return 1; -+ } -+ } -+ return 0; -+} -+static int v4l2loopback_lookup(int device_nr, -+ struct v4l2_loopback_device **device) ++module_platform_driver(steamdeck_hwmon_driver); ++ ++MODULE_AUTHOR("Andrey Smirnov "); ++MODULE_DESCRIPTION("Steam Deck EC sensors driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c +index b5cbb57ee5f6..a0f7fa1518c6 100644 +--- a/drivers/input/evdev.c ++++ b/drivers/input/evdev.c +@@ -46,6 +46,7 @@ struct evdev_client { + struct fasync_struct *fasync; + struct evdev *evdev; + struct list_head node; ++ struct rcu_head rcu; + enum input_clock_type clk_type; + bool revoked; + unsigned long *evmasks[EV_CNT]; +@@ -368,13 +369,22 @@ static void evdev_attach_client(struct evdev *evdev, + spin_unlock(&evdev->client_lock); + } + ++static void evdev_reclaim_client(struct rcu_head *rp) +{ -+ struct v4l2loopback_lookup_cb_data data = { -+ .device_nr = device_nr, -+ .device = NULL, -+ }; -+ int err = idr_for_each(&v4l2loopback_index_idr, &v4l2loopback_lookup_cb, -+ &data); -+ if (1 == err) { -+ if (device) -+ *device = data.device; -+ return data.device_nr; -+ } -+ return -ENODEV; ++ struct evdev_client *client = container_of(rp, struct evdev_client, rcu); ++ unsigned int i; ++ for (i = 0; i < EV_CNT; ++i) ++ bitmap_free(client->evmasks[i]); ++ kvfree(client); +} -+static struct v4l2_loopback_device *v4l2loopback_cd2dev(struct device *cd) -+{ -+ struct video_device *loopdev = to_video_device(cd); -+ struct v4l2loopback_private *ptr = -+ (struct v4l2loopback_private *)video_get_drvdata(loopdev); -+ int nr = ptr->device_nr; + -+ return idr_find(&v4l2loopback_index_idr, nr); -+} + static void evdev_detach_client(struct evdev *evdev, + struct evdev_client *client) + { + spin_lock(&evdev->client_lock); + list_del_rcu(&client->node); + spin_unlock(&evdev->client_lock); +- synchronize_rcu(); ++ call_rcu(&client->rcu, evdev_reclaim_client); + } + + static int evdev_open_device(struct evdev *evdev) +@@ -427,7 +437,6 @@ static int evdev_release(struct inode *inode, struct file *file) + { + struct evdev_client *client = file->private_data; + struct evdev *evdev = client->evdev; +- unsigned int i; + + mutex_lock(&evdev->mutex); + +@@ -439,11 +448,6 @@ static int evdev_release(struct inode *inode, struct file *file) + + evdev_detach_client(evdev, client); + +- for (i = 0; i < EV_CNT; ++i) +- bitmap_free(client->evmasks[i]); +- +- kvfree(client); +- + evdev_close_device(evdev); + + return 0; +@@ -486,7 +490,6 @@ static int evdev_open(struct inode *inode, struct file *file) + + err_free_client: + evdev_detach_client(evdev, client); +- kvfree(client); + return error; + } + +diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c +index dfdfb59cc8b5..e0da70576167 100644 +--- a/drivers/input/mouse/bcm5974.c ++++ b/drivers/input/mouse/bcm5974.c +@@ -83,6 +83,24 @@ + #define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273 + #define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274 + ++/* T2-Attached Devices */ ++/* MacbookAir8,1 (2018) */ ++#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K 0x027a ++/* MacbookPro15,2 (2018) */ ++#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132 0x027b ++/* MacbookPro15,1 (2018) */ ++#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680 0x027c ++/* MacbookPro15,4 (2019) */ ++#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213 0x027d ++/* MacbookPro16,2 (2020) */ ++#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K 0x027e ++/* MacbookPro16,3 (2020) */ ++#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223 0x027f ++/* MacbookAir9,1 (2020) */ ++#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K 0x0280 ++/* MacbookPro16,1 (2019)*/ ++#define USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F 0x0340 ++ + #define BCM5974_DEVICE(prod) { \ + .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ + USB_DEVICE_ID_MATCH_INT_CLASS | \ +@@ -147,6 +165,22 @@ static const struct usb_device_id bcm5974_table[] = { + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ISO), + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_JIS), ++ /* MacbookAir8,1 */ ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K), ++ /* MacbookPro15,2 */ ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132), ++ /* MacbookPro15,1 */ ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680), ++ /* MacbookPro15,4 */ ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213), ++ /* MacbookPro16,2 */ ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K), ++ /* MacbookPro16,3 */ ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223), ++ /* MacbookAir9,1 */ ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K), ++ /* MacbookPro16,1 */ ++ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F), + /* Terminating entry */ + {} + }; +@@ -483,6 +517,110 @@ static const struct bcm5974_config bcm5974_config_table[] = { + { SN_COORD, -203, 6803 }, + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } + }, ++ { ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J140K, ++ 0, ++ 0, ++ HAS_INTEGRATED_BUTTON, ++ 0, sizeof(struct bt_data), ++ 0x83, DATAFORMAT(TYPE4), ++ { SN_PRESSURE, 0, 300 }, ++ { SN_WIDTH, 0, 2048 }, ++ { SN_COORD, -6243, 6749 }, ++ { SN_COORD, -170, 7685 }, ++ { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } ++ }, ++ { ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J132, ++ 0, ++ 0, ++ HAS_INTEGRATED_BUTTON, ++ 0, sizeof(struct bt_data), ++ 0x83, DATAFORMAT(TYPE4), ++ { SN_PRESSURE, 0, 300 }, ++ { SN_WIDTH, 0, 2048 }, ++ { SN_COORD, -6243, 6749 }, ++ { SN_COORD, -170, 7685 }, ++ { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } ++ }, ++ { ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J680, ++ 0, ++ 0, ++ HAS_INTEGRATED_BUTTON, ++ 0, sizeof(struct bt_data), ++ 0x83, DATAFORMAT(TYPE4), ++ { SN_PRESSURE, 0, 300 }, ++ { SN_WIDTH, 0, 2048 }, ++ { SN_COORD, -7456, 7976 }, ++ { SN_COORD, -1768, 7685 }, ++ { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } ++ }, ++ { ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J213, ++ 0, ++ 0, ++ HAS_INTEGRATED_BUTTON, ++ 0, sizeof(struct bt_data), ++ 0x83, DATAFORMAT(TYPE4), ++ { SN_PRESSURE, 0, 300 }, ++ { SN_WIDTH, 0, 2048 }, ++ { SN_COORD, -6243, 6749 }, ++ { SN_COORD, -170, 7685 }, ++ { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } ++ }, ++ { ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J214K, ++ 0, ++ 0, ++ HAS_INTEGRATED_BUTTON, ++ 0, sizeof(struct bt_data), ++ 0x83, DATAFORMAT(TYPE4), ++ { SN_PRESSURE, 0, 300 }, ++ { SN_WIDTH, 0, 2048 }, ++ { SN_COORD, -7823, 8329 }, ++ { SN_COORD, -370, 7925 }, ++ { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } ++ }, ++ { ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J223, ++ 0, ++ 0, ++ HAS_INTEGRATED_BUTTON, ++ 0, sizeof(struct bt_data), ++ 0x83, DATAFORMAT(TYPE4), ++ { SN_PRESSURE, 0, 300 }, ++ { SN_WIDTH, 0, 2048 }, ++ { SN_COORD, -6243, 6749 }, ++ { SN_COORD, -170, 7685 }, ++ { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } ++ }, ++ { ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J230K, ++ 0, ++ 0, ++ HAS_INTEGRATED_BUTTON, ++ 0, sizeof(struct bt_data), ++ 0x83, DATAFORMAT(TYPE4), ++ { SN_PRESSURE, 0, 300 }, ++ { SN_WIDTH, 0, 2048 }, ++ { SN_COORD, -6243, 6749 }, ++ { SN_COORD, -170, 7685 }, ++ { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } ++ }, ++ { ++ USB_DEVICE_ID_APPLE_WELLSPRINGT2_J152F, ++ 0, ++ 0, ++ HAS_INTEGRATED_BUTTON, ++ 0, sizeof(struct bt_data), ++ 0x83, DATAFORMAT(TYPE4), ++ { SN_PRESSURE, 0, 300 }, ++ { SN_WIDTH, 0, 2048 }, ++ { SN_COORD, -8916, 9918 }, ++ { SN_COORD, -1934, 9835 }, ++ { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } ++ }, + {} + }; + +diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig +index b784bb74a837..fb3d45acfd9d 100644 +--- a/drivers/leds/Kconfig ++++ b/drivers/leds/Kconfig +@@ -959,6 +959,13 @@ config LEDS_ACER_A500 + This option enables support for the Power Button LED of + Acer Iconia Tab A500. + ++config LEDS_STEAMDECK ++ tristate "LED support for Steam Deck" ++ depends on LEDS_CLASS && MFD_STEAMDECK ++ help ++ This option enabled support for the status LED (next to the ++ power button) on Steam Deck + -+static struct v4l2_loopback_device *v4l2loopback_getdevice(struct file *f) -+{ -+ struct v4l2loopback_private *ptr = video_drvdata(f); -+ int nr = ptr->device_nr; + source "drivers/leds/blink/Kconfig" + + comment "Flash and Torch LED drivers" +diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile +index 18afbb5a23ee..b0ff19d14419 100644 +--- a/drivers/leds/Makefile ++++ b/drivers/leds/Makefile +@@ -81,6 +81,7 @@ obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o + obj-$(CONFIG_LEDS_PWM) += leds-pwm.o + obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o + obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o ++obj-$(CONFIG_LEDS_STEAMDECK) += leds-steamdeck.o + obj-$(CONFIG_LEDS_SUN50I_A100) += leds-sun50i-a100.o + obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o + obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o +diff --git a/drivers/leds/leds-steamdeck.c b/drivers/leds/leds-steamdeck.c +new file mode 100644 +index 000000000000..56d31d2dd099 +--- /dev/null ++++ b/drivers/leds/leds-steamdeck.c +@@ -0,0 +1,74 @@ ++// SPDX-License-Identifier: GPL-2.0+ + -+ return idr_find(&v4l2loopback_index_idr, nr); -+} ++/* ++ * Steam Deck EC MFD LED cell driver ++ * ++ * Copyright (C) 2021-2022 Valve Corporation ++ * ++ */ + -+/* forward declarations */ -+static void client_usage_queue_event(struct video_device *vdev); -+static void init_buffers(struct v4l2_loopback_device *dev); -+static int allocate_buffers(struct v4l2_loopback_device *dev); -+static void free_buffers(struct v4l2_loopback_device *dev); -+static void try_free_buffers(struct v4l2_loopback_device *dev); -+static int allocate_timeout_image(struct v4l2_loopback_device *dev); -+static void check_timers(struct v4l2_loopback_device *dev); -+static const struct v4l2_file_operations v4l2_loopback_fops; -+static const struct v4l2_ioctl_ops v4l2_loopback_ioctl_ops; ++#include ++#include ++#include + -+/* Queue helpers */ -+/* next functions sets buffer flags and adjusts counters accordingly */ -+static inline void set_done(struct v4l2l_buffer *buffer) -+{ -+ buffer->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED; -+ buffer->buffer.flags |= V4L2_BUF_FLAG_DONE; -+} ++struct steamdeck_led { ++ struct acpi_device *adev; ++ struct led_classdev cdev; ++}; + -+static inline void set_queued(struct v4l2l_buffer *buffer) ++static int steamdeck_leds_brightness_set(struct led_classdev *cdev, ++ enum led_brightness value) +{ -+ buffer->buffer.flags &= ~V4L2_BUF_FLAG_DONE; -+ buffer->buffer.flags |= V4L2_BUF_FLAG_QUEUED; -+} ++ struct steamdeck_led *sd = container_of(cdev, struct steamdeck_led, ++ cdev); + -+static inline void unset_flags(struct v4l2l_buffer *buffer) -+{ -+ buffer->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED; -+ buffer->buffer.flags &= ~V4L2_BUF_FLAG_DONE; ++ if (ACPI_FAILURE(acpi_execute_simple_method(sd->adev->handle, ++ "CHBV", value))) ++ return -EIO; ++ ++ return 0; +} + -+/* V4L2 ioctl caps and params calls */ -+/* returns device capabilities -+ * called on VIDIOC_QUERYCAP -+ */ -+static int vidioc_querycap(struct file *file, void *priv, -+ struct v4l2_capability *cap) ++static int steamdeck_leds_probe(struct platform_device *pdev) +{ -+ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); -+ int device_nr = -+ ((struct v4l2loopback_private *)video_get_drvdata(dev->vdev)) -+ ->device_nr; -+ __u32 capabilities = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; ++ struct device *dev = &pdev->dev; ++ struct steamdeck_led *sd; ++ int ret; + -+ strscpy(cap->driver, "v4l2 loopback", sizeof(cap->driver)); -+ snprintf(cap->card, sizeof(cap->card), "%s", dev->card_label); -+ snprintf(cap->bus_info, sizeof(cap->bus_info), -+ "platform:v4l2loopback-%03d", device_nr); ++ sd = devm_kzalloc(dev, sizeof(*sd), GFP_KERNEL); ++ if (!sd) ++ return -ENOMEM; + -+ if (dev->announce_all_caps) { -+ capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT; -+ } else { -+ if (dev->ready_for_capture) { -+ capabilities |= V4L2_CAP_VIDEO_CAPTURE; -+ } -+ if (dev->ready_for_output) { -+ capabilities |= V4L2_CAP_VIDEO_OUTPUT; -+ } -+ } ++ sd->adev = ACPI_COMPANION(dev->parent); + -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) -+ dev->vdev->device_caps = -+#endif /* >=linux-4.7.0 */ -+ cap->device_caps = cap->capabilities = capabilities; ++ sd->cdev.name = "status:white"; ++ sd->cdev.brightness_set_blocking = steamdeck_leds_brightness_set; ++ sd->cdev.max_brightness = 100; + -+ cap->capabilities |= V4L2_CAP_DEVICE_CAPS; ++ ret = devm_led_classdev_register(dev, &sd->cdev); ++ if (ret) { ++ dev_err(dev, "Failed to register LEDs device: %d\n", ret); ++ return ret; ++ } + -+ memset(cap->reserved, 0, sizeof(cap->reserved)); + return 0; +} + -+static int vidioc_enum_framesizes(struct file *file, void *fh, -+ struct v4l2_frmsizeenum *argp) -+{ -+ struct v4l2_loopback_device *dev; -+ -+ /* there can be only one... */ -+ if (argp->index) -+ return -EINVAL; -+ -+ dev = v4l2loopback_getdevice(file); -+ if (V4L2LOOPBACK_IS_FIXED_FMT(dev)) { -+ /* format has already been negotiated -+ * cannot change during runtime -+ */ -+ if (argp->pixel_format != dev->pix_format.pixelformat) -+ return -EINVAL; -+ -+ argp->type = V4L2_FRMSIZE_TYPE_DISCRETE; -+ -+ argp->discrete.width = dev->pix_format.width; -+ argp->discrete.height = dev->pix_format.height; -+ } else { -+ /* if the format has not been negotiated yet, we accept anything -+ */ -+ if (NULL == format_by_fourcc(argp->pixel_format)) -+ return -EINVAL; -+ -+ if (dev->min_width == dev->max_width && -+ dev->min_height == dev->max_height) { -+ argp->type = V4L2_FRMSIZE_TYPE_DISCRETE; ++static const struct platform_device_id steamdeck_leds_id_table[] = { ++ { .name = "steamdeck-leds" }, ++ {} ++}; ++MODULE_DEVICE_TABLE(platform, steamdeck_leds_id_table); + -+ argp->discrete.width = dev->min_width; -+ argp->discrete.height = dev->min_height; -+ } else { -+ argp->type = V4L2_FRMSIZE_TYPE_CONTINUOUS; ++static struct platform_driver steamdeck_leds_driver = { ++ .probe = steamdeck_leds_probe, ++ .driver = { ++ .name = "steamdeck-leds", ++ }, ++ .id_table = steamdeck_leds_id_table, ++}; ++module_platform_driver(steamdeck_leds_driver); + -+ argp->stepwise.min_width = dev->min_width; -+ argp->stepwise.min_height = dev->min_height; ++MODULE_AUTHOR("Andrey Smirnov "); ++MODULE_DESCRIPTION("Steam Deck LEDs driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c +index 1ae2c71bb383..784829ada178 100644 +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -3315,6 +3315,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) + goto bad; + } + ++#ifdef CONFIG_CACHY ++ set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags); ++ set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags); ++#endif + -+ argp->stepwise.max_width = dev->max_width; -+ argp->stepwise.max_height = dev->max_height; + ret = crypt_ctr_cipher(ti, argv[0], argv[1]); + if (ret < 0) + goto bad; +diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig +index 331b8e535e5b..80dabeebf580 100644 +--- a/drivers/media/v4l2-core/Kconfig ++++ b/drivers/media/v4l2-core/Kconfig +@@ -40,6 +40,11 @@ config VIDEO_TUNER + config V4L2_JPEG_HELPER + tristate + ++config V4L2_LOOPBACK ++ tristate "V4L2 loopback device" ++ help ++ V4L2 loopback device + -+ argp->stepwise.step_width = 1; -+ argp->stepwise.step_height = 1; -+ } -+ } -+ return 0; -+} + # Used by drivers that need v4l2-h264.ko + config V4L2_H264 + tristate +diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile +index 2177b9d63a8f..c179507cedc4 100644 +--- a/drivers/media/v4l2-core/Makefile ++++ b/drivers/media/v4l2-core/Makefile +@@ -33,5 +33,7 @@ obj-$(CONFIG_V4L2_JPEG_HELPER) += v4l2-jpeg.o + obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o + obj-$(CONFIG_V4L2_VP9) += v4l2-vp9.o + ++obj-$(CONFIG_V4L2_LOOPBACK) += v4l2loopback.o + -+/* returns frameinterval (fps) for the set resolution -+ * called on VIDIOC_ENUM_FRAMEINTERVALS + obj-$(CONFIG_VIDEO_TUNER) += tuner.o + obj-$(CONFIG_VIDEO_DEV) += v4l2-dv-timings.o videodev.o +diff --git a/drivers/media/v4l2-core/v4l2loopback.c b/drivers/media/v4l2-core/v4l2loopback.c +new file mode 100644 +index 000000000000..25cb1beb26e5 +--- /dev/null ++++ b/drivers/media/v4l2-core/v4l2loopback.c +@@ -0,0 +1,3184 @@ ++/* -*- c-file-style: "linux" -*- */ ++/* ++ * v4l2loopback.c -- video4linux2 loopback driver ++ * ++ * Copyright (C) 2005-2009 Vasily Levin (vasaka@gmail.com) ++ * Copyright (C) 2010-2023 IOhannes m zmoelnig (zmoelnig@iem.at) ++ * Copyright (C) 2011 Stefan Diewald (stefan.diewald@mytum.de) ++ * Copyright (C) 2012 Anton Novikov (random.plant@gmail.com) ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * + */ -+static int vidioc_enum_frameintervals(struct file *file, void *fh, -+ struct v4l2_frmivalenum *argp) -+{ -+ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include + -+ /* there can be only one... */ -+ if (argp->index) -+ return -EINVAL; ++#include ++#include "v4l2loopback.h" + -+ if (V4L2LOOPBACK_IS_FIXED_FMT(dev)) { -+ if (argp->width != dev->pix_format.width || -+ argp->height != dev->pix_format.height || -+ argp->pixel_format != dev->pix_format.pixelformat) -+ return -EINVAL; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) ++#error This module is not supported on kernels before 4.0.0. ++#endif + -+ argp->type = V4L2_FRMIVAL_TYPE_DISCRETE; -+ argp->discrete = dev->capture_param.timeperframe; -+ } else { -+ if (argp->width < dev->min_width || -+ argp->width > dev->max_width || -+ argp->height < dev->min_height || -+ argp->height > dev->max_height || -+ NULL == format_by_fourcc(argp->pixel_format)) -+ return -EINVAL; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0) ++#define strscpy strlcpy ++#endif + -+ argp->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; -+ argp->stepwise.min.numerator = 1; -+ argp->stepwise.min.denominator = V4L2LOOPBACK_FPS_MAX; -+ argp->stepwise.max.numerator = 1; -+ argp->stepwise.max.denominator = V4L2LOOPBACK_FPS_MIN; -+ argp->stepwise.step.numerator = 1; -+ argp->stepwise.step.denominator = 1; -+ } ++#if defined(timer_setup) && defined(from_timer) ++#define HAVE_TIMER_SETUP ++#endif + -+ return 0; -+} ++#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 7, 0) ++#define VFL_TYPE_VIDEO VFL_TYPE_GRABBER ++#endif + -+/* ------------------ CAPTURE ----------------------- */ ++#define V4L2LOOPBACK_VERSION_CODE \ ++ KERNEL_VERSION(V4L2LOOPBACK_VERSION_MAJOR, V4L2LOOPBACK_VERSION_MINOR, \ ++ V4L2LOOPBACK_VERSION_BUGFIX) + -+/* returns device formats -+ * called on VIDIOC_ENUM_FMT, with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_CAPTURE -+ */ -+static int vidioc_enum_fmt_cap(struct file *file, void *fh, -+ struct v4l2_fmtdesc *f) -+{ -+ struct v4l2_loopback_device *dev; -+ const struct v4l2l_format *fmt; -+ MARK(); ++MODULE_DESCRIPTION("V4L2 loopback video device"); ++MODULE_AUTHOR("Vasily Levin, " ++ "IOhannes m zmoelnig ," ++ "Stefan Diewald," ++ "Anton Novikov" ++ "et al."); ++#ifdef SNAPSHOT_VERSION ++MODULE_VERSION(__stringify(SNAPSHOT_VERSION)); ++#else ++MODULE_VERSION("" __stringify(V4L2LOOPBACK_VERSION_MAJOR) "." __stringify( ++ V4L2LOOPBACK_VERSION_MINOR) "." __stringify(V4L2LOOPBACK_VERSION_BUGFIX)); ++#endif ++MODULE_LICENSE("GPL"); + -+ dev = v4l2loopback_getdevice(file); ++/* ++ * helpers ++ */ ++#define dprintk(fmt, args...) \ ++ do { \ ++ if (debug > 0) { \ ++ printk(KERN_INFO "v4l2-loopback[" __stringify( \ ++ __LINE__) "], pid(%d): " fmt, \ ++ task_pid_nr(current), ##args); \ ++ } \ ++ } while (0) + -+ if (f->index) -+ return -EINVAL; ++#define MARK() \ ++ do { \ ++ if (debug > 1) { \ ++ printk(KERN_INFO "%s:%d[%s], pid(%d)\n", __FILE__, \ ++ __LINE__, __func__, task_pid_nr(current)); \ ++ } \ ++ } while (0) + -+ if (V4L2LOOPBACK_IS_FIXED_FMT(dev)) { -+ /* format has been fixed, so only one single format is supported */ -+ const __u32 format = dev->pix_format.pixelformat; ++#define dprintkrw(fmt, args...) \ ++ do { \ ++ if (debug > 2) { \ ++ printk(KERN_INFO "v4l2-loopback[" __stringify( \ ++ __LINE__) "], pid(%d): " fmt, \ ++ task_pid_nr(current), ##args); \ ++ } \ ++ } while (0) + -+ if ((fmt = format_by_fourcc(format))) { -+ snprintf(f->description, sizeof(f->description), "%s", -+ fmt->name); -+ } else { -+ snprintf(f->description, sizeof(f->description), -+ "[%c%c%c%c]", (format >> 0) & 0xFF, -+ (format >> 8) & 0xFF, (format >> 16) & 0xFF, -+ (format >> 24) & 0xFF); -+ } ++static inline void v4l2l_get_timestamp(struct v4l2_buffer *b) ++{ ++ struct timespec64 ts; ++ ktime_get_ts64(&ts); + -+ f->pixelformat = dev->pix_format.pixelformat; -+ } else { -+ return -EINVAL; -+ } -+ f->flags = 0; -+ MARK(); -+ return 0; ++ b->timestamp.tv_sec = ts.tv_sec; ++ b->timestamp.tv_usec = (ts.tv_nsec / NSEC_PER_USEC); ++ b->flags |= V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; +} + -+/* returns current video format -+ * called on VIDIOC_G_FMT, with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_CAPTURE -+ */ -+static int vidioc_g_fmt_cap(struct file *file, void *priv, -+ struct v4l2_format *fmt) ++#if BITS_PER_LONG == 32 ++#include /* do_div() for 64bit division */ ++static inline int v4l2l_mod64(const s64 A, const u32 B) +{ -+ struct v4l2_loopback_device *dev; -+ MARK(); -+ -+ dev = v4l2loopback_getdevice(file); -+ if (!dev->ready_for_capture && !dev->ready_for_output) -+ return -EINVAL; ++ u64 a = (u64)A; ++ u32 b = B; + -+ fmt->fmt.pix = dev->pix_format; -+ MARK(); -+ return 0; ++ if (A > 0) ++ return do_div(a, b); ++ a = -A; ++ return -do_div(a, b); +} -+ -+/* checks if it is OK to change to format fmt; -+ * actual check is done by inner_try_setfmt -+ * just checking that pixelformat is OK and set other parameters, app should -+ * obey this decision -+ * called on VIDIOC_TRY_FMT, with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_CAPTURE -+ */ -+static int vidioc_try_fmt_cap(struct file *file, void *priv, -+ struct v4l2_format *fmt) ++#else ++static inline int v4l2l_mod64(const s64 A, const u32 B) +{ -+ int ret = 0; -+ if (!V4L2_TYPE_IS_CAPTURE(fmt->type)) -+ return -EINVAL; -+ ret = inner_try_setfmt(file, fmt); -+ if (-EBUSY == ret) -+ return 0; -+ return ret; ++ return A % B; +} ++#endif + -+/* sets new output format, if possible -+ * actually format is set by input and we even do not check it, just return -+ * current one, but it is possible to set subregions of input TODO(vasaka) -+ * called on VIDIOC_S_FMT, with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_CAPTURE -+ */ -+static int vidioc_s_fmt_cap(struct file *file, void *priv, -+ struct v4l2_format *fmt) -+{ -+ int ret; -+ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); -+ if (!V4L2_TYPE_IS_CAPTURE(fmt->type)) -+ return -EINVAL; -+ ret = inner_try_setfmt(file, fmt); -+ if (!ret) { -+ dev->pix_format = fmt->fmt.pix; -+ } -+ return ret; -+} -+ -+/* ------------------ OUTPUT ----------------------- */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0) ++typedef unsigned __poll_t; ++#endif + -+/* returns device formats; -+ * LATER: allow all formats -+ * called on VIDIOC_ENUM_FMT, with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_OUTPUT ++/* module constants ++ * can be overridden during he build process using something like ++ * make KCPPFLAGS="-DMAX_DEVICES=100" + */ -+static int vidioc_enum_fmt_out(struct file *file, void *fh, -+ struct v4l2_fmtdesc *f) -+{ -+ struct v4l2_loopback_device *dev; -+ const struct v4l2l_format *fmt; -+ -+ dev = v4l2loopback_getdevice(file); -+ -+ if (V4L2LOOPBACK_IS_FIXED_FMT(dev)) { -+ /* format has been fixed, so only one single format is supported */ -+ const __u32 format = dev->pix_format.pixelformat; + -+ if (f->index) -+ return -EINVAL; ++/* maximum number of v4l2loopback devices that can be created */ ++#ifndef MAX_DEVICES ++#define MAX_DEVICES 8 ++#endif + -+ if ((fmt = format_by_fourcc(format))) { -+ snprintf(f->description, sizeof(f->description), "%s", -+ fmt->name); -+ } else { -+ snprintf(f->description, sizeof(f->description), -+ "[%c%c%c%c]", (format >> 0) & 0xFF, -+ (format >> 8) & 0xFF, (format >> 16) & 0xFF, -+ (format >> 24) & 0xFF); -+ } ++/* whether the default is to announce capabilities exclusively or not */ ++#ifndef V4L2LOOPBACK_DEFAULT_EXCLUSIVECAPS ++#define V4L2LOOPBACK_DEFAULT_EXCLUSIVECAPS 0 ++#endif + -+ f->pixelformat = dev->pix_format.pixelformat; -+ } else { -+ /* fill in a dummy format */ -+ /* coverity[unsigned_compare] */ -+ if (f->index < 0 || f->index >= FORMATS) -+ return -EINVAL; ++/* when a producer is considered to have gone stale */ ++#ifndef MAX_TIMEOUT ++#define MAX_TIMEOUT (100 * 1000) /* in msecs */ ++#endif + -+ fmt = &formats[f->index]; ++/* max buffers that can be mapped, actually they ++ * are all mapped to max_buffers buffers */ ++#ifndef MAX_BUFFERS ++#define MAX_BUFFERS 32 ++#endif + -+ f->pixelformat = fmt->fourcc; -+ snprintf(f->description, sizeof(f->description), "%s", -+ fmt->name); -+ } -+ f->flags = 0; ++/* module parameters */ ++static int debug = 0; ++module_param(debug, int, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC(debug, "debugging level (higher values == more verbose)"); + -+ return 0; -+} ++#define V4L2LOOPBACK_DEFAULT_MAX_BUFFERS 2 ++static int max_buffers = V4L2LOOPBACK_DEFAULT_MAX_BUFFERS; ++module_param(max_buffers, int, S_IRUGO); ++MODULE_PARM_DESC(max_buffers, ++ "how many buffers should be allocated [DEFAULT: " __stringify( ++ V4L2LOOPBACK_DEFAULT_MAX_BUFFERS) "]"); + -+/* returns current video format format fmt */ -+/* NOTE: this is called from the producer -+ * so if format has not been negotiated yet, -+ * it should return ALL of available formats, -+ * called on VIDIOC_G_FMT, with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_OUTPUT ++/* how many times a device can be opened ++ * the per-module default value can be overridden on a per-device basis using ++ * the /sys/devices interface ++ * ++ * note that max_openers should be at least 2 in order to get a working system: ++ * one opener for the producer and one opener for the consumer ++ * however, we leave that to the user + */ -+static int vidioc_g_fmt_out(struct file *file, void *priv, -+ struct v4l2_format *fmt) -+{ -+ struct v4l2_loopback_device *dev; -+ MARK(); -+ -+ dev = v4l2loopback_getdevice(file); ++#define V4L2LOOPBACK_DEFAULT_MAX_OPENERS 10 ++static int max_openers = V4L2LOOPBACK_DEFAULT_MAX_OPENERS; ++module_param(max_openers, int, S_IRUGO | S_IWUSR); ++MODULE_PARM_DESC( ++ max_openers, ++ "how many users can open the loopback device [DEFAULT: " __stringify( ++ V4L2LOOPBACK_DEFAULT_MAX_OPENERS) "]"); + -+ /* -+ * LATER: this should return the currently valid format -+ * gstreamer doesn't like it, if this returns -EINVAL, as it -+ * then concludes that there is _no_ valid format -+ * CHECK whether this assumption is wrong, -+ * or whether we have to always provide a valid format -+ */ ++static int devices = -1; ++module_param(devices, int, 0); ++MODULE_PARM_DESC(devices, "how many devices should be created"); + -+ fmt->fmt.pix = dev->pix_format; -+ return 0; -+} ++static int video_nr[MAX_DEVICES] = { [0 ...(MAX_DEVICES - 1)] = -1 }; ++module_param_array(video_nr, int, NULL, 0444); ++MODULE_PARM_DESC(video_nr, ++ "video device numbers (-1=auto, 0=/dev/video0, etc.)"); + -+/* checks if it is OK to change to format fmt; -+ * if format is negotiated do not change it -+ * called on VIDIOC_TRY_FMT with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_OUTPUT -+ */ -+static int vidioc_try_fmt_out(struct file *file, void *priv, -+ struct v4l2_format *fmt) -+{ -+ int ret = 0; -+ if (!V4L2_TYPE_IS_OUTPUT(fmt->type)) -+ return -EINVAL; -+ ret = inner_try_setfmt(file, fmt); -+ if (-EBUSY == ret) -+ return 0; -+ return ret; -+} ++static char *card_label[MAX_DEVICES]; ++module_param_array(card_label, charp, NULL, 0000); ++MODULE_PARM_DESC(card_label, "card labels for each device"); + -+/* sets new output format, if possible; -+ * allocate data here because we do not know if it will be streaming or -+ * read/write IO -+ * called on VIDIOC_S_FMT with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_OUTPUT -+ */ -+static int vidioc_s_fmt_out(struct file *file, void *priv, -+ struct v4l2_format *fmt) -+{ -+ struct v4l2_loopback_device *dev; -+ int ret; -+ char buf[5]; -+ buf[4] = 0; -+ if (!V4L2_TYPE_IS_OUTPUT(fmt->type)) -+ return -EINVAL; -+ dev = v4l2loopback_getdevice(file); ++static bool exclusive_caps[MAX_DEVICES] = { ++ [0 ...(MAX_DEVICES - 1)] = V4L2LOOPBACK_DEFAULT_EXCLUSIVECAPS ++}; ++module_param_array(exclusive_caps, bool, NULL, 0444); ++/* FIXXME: wording */ ++MODULE_PARM_DESC( ++ exclusive_caps, ++ "whether to announce OUTPUT/CAPTURE capabilities exclusively or not [DEFAULT: " __stringify( ++ V4L2LOOPBACK_DEFAULT_EXCLUSIVECAPS) "]"); + -+ ret = inner_try_setfmt(file, fmt); -+ if (!ret) { -+ dev->pix_format = fmt->fmt.pix; -+ dev->pix_format_has_valid_sizeimage = -+ v4l2l_pix_format_has_valid_sizeimage(fmt); -+ dprintk("s_fmt_out(%d) %d...%d\n", ret, dev->ready_for_capture, -+ dev->pix_format.sizeimage); -+ dprintk("outFOURCC=%s\n", -+ fourcc2str(dev->pix_format.pixelformat, buf)); ++/* format specifications */ ++#define V4L2LOOPBACK_SIZE_MIN_WIDTH 2 ++#define V4L2LOOPBACK_SIZE_MIN_HEIGHT 1 ++#define V4L2LOOPBACK_SIZE_DEFAULT_MAX_WIDTH 8192 ++#define V4L2LOOPBACK_SIZE_DEFAULT_MAX_HEIGHT 8192 + -+ if (!dev->ready_for_capture) { -+ dev->buffer_size = -+ PAGE_ALIGN(dev->pix_format.sizeimage); -+ // JMZ: TODO get rid of the next line -+ fmt->fmt.pix.sizeimage = dev->buffer_size; -+ ret = allocate_buffers(dev); -+ } -+ } -+ return ret; -+} ++#define V4L2LOOPBACK_SIZE_DEFAULT_WIDTH 640 ++#define V4L2LOOPBACK_SIZE_DEFAULT_HEIGHT 480 + -+// #define V4L2L_OVERLAY -+#ifdef V4L2L_OVERLAY -+/* ------------------ OVERLAY ----------------------- */ -+/* currently unsupported */ -+/* GSTreamer's v4l2sink is buggy, as it requires the overlay to work -+ * while it should only require it, if overlay is requested -+ * once the gstreamer element is fixed, remove the overlay dummies -+ */ -+#warning OVERLAY dummies -+static int vidioc_g_fmt_overlay(struct file *file, void *priv, -+ struct v4l2_format *fmt) -+{ -+ return 0; -+} ++static int max_width = V4L2LOOPBACK_SIZE_DEFAULT_MAX_WIDTH; ++module_param(max_width, int, S_IRUGO); ++MODULE_PARM_DESC(max_width, ++ "maximum allowed frame width [DEFAULT: " __stringify( ++ V4L2LOOPBACK_SIZE_DEFAULT_MAX_WIDTH) "]"); ++static int max_height = V4L2LOOPBACK_SIZE_DEFAULT_MAX_HEIGHT; ++module_param(max_height, int, S_IRUGO); ++MODULE_PARM_DESC(max_height, ++ "maximum allowed frame height [DEFAULT: " __stringify( ++ V4L2LOOPBACK_SIZE_DEFAULT_MAX_HEIGHT) "]"); + -+static int vidioc_s_fmt_overlay(struct file *file, void *priv, -+ struct v4l2_format *fmt) -+{ -+ return 0; -+} -+#endif /* V4L2L_OVERLAY */ ++static DEFINE_IDR(v4l2loopback_index_idr); ++static DEFINE_MUTEX(v4l2loopback_ctl_mutex); + -+/* ------------------ PARAMs ----------------------- */ ++/* frame intervals */ ++#define V4L2LOOPBACK_FPS_MIN 0 ++#define V4L2LOOPBACK_FPS_MAX 1000 + -+/* get some data flow parameters, only capability, fps and readbuffers has -+ * effect on this driver -+ * called on VIDIOC_G_PARM -+ */ -+static int vidioc_g_parm(struct file *file, void *priv, -+ struct v4l2_streamparm *parm) -+{ -+ /* do not care about type of opener, hope these enums would always be -+ * compatible */ -+ struct v4l2_loopback_device *dev; -+ MARK(); ++/* control IDs */ ++#define V4L2LOOPBACK_CID_BASE (V4L2_CID_USER_BASE | 0xf000) ++#define CID_KEEP_FORMAT (V4L2LOOPBACK_CID_BASE + 0) ++#define CID_SUSTAIN_FRAMERATE (V4L2LOOPBACK_CID_BASE + 1) ++#define CID_TIMEOUT (V4L2LOOPBACK_CID_BASE + 2) ++#define CID_TIMEOUT_IMAGE_IO (V4L2LOOPBACK_CID_BASE + 3) + -+ dev = v4l2loopback_getdevice(file); -+ parm->parm.capture = dev->capture_param; -+ return 0; -+} ++static int v4l2loopback_s_ctrl(struct v4l2_ctrl *ctrl); ++static const struct v4l2_ctrl_ops v4l2loopback_ctrl_ops = { ++ .s_ctrl = v4l2loopback_s_ctrl, ++}; ++static const struct v4l2_ctrl_config v4l2loopback_ctrl_keepformat = { ++ // clang-format off ++ .ops = &v4l2loopback_ctrl_ops, ++ .id = CID_KEEP_FORMAT, ++ .name = "keep_format", ++ .type = V4L2_CTRL_TYPE_BOOLEAN, ++ .min = 0, ++ .max = 1, ++ .step = 1, ++ .def = 0, ++ // clang-format on ++}; ++static const struct v4l2_ctrl_config v4l2loopback_ctrl_sustainframerate = { ++ // clang-format off ++ .ops = &v4l2loopback_ctrl_ops, ++ .id = CID_SUSTAIN_FRAMERATE, ++ .name = "sustain_framerate", ++ .type = V4L2_CTRL_TYPE_BOOLEAN, ++ .min = 0, ++ .max = 1, ++ .step = 1, ++ .def = 0, ++ // clang-format on ++}; ++static const struct v4l2_ctrl_config v4l2loopback_ctrl_timeout = { ++ // clang-format off ++ .ops = &v4l2loopback_ctrl_ops, ++ .id = CID_TIMEOUT, ++ .name = "timeout", ++ .type = V4L2_CTRL_TYPE_INTEGER, ++ .min = 0, ++ .max = MAX_TIMEOUT, ++ .step = 1, ++ .def = 0, ++ // clang-format on ++}; ++static const struct v4l2_ctrl_config v4l2loopback_ctrl_timeoutimageio = { ++ // clang-format off ++ .ops = &v4l2loopback_ctrl_ops, ++ .id = CID_TIMEOUT_IMAGE_IO, ++ .name = "timeout_image_io", ++ .type = V4L2_CTRL_TYPE_BUTTON, ++ .min = 0, ++ .max = 1, ++ .step = 1, ++ .def = 0, ++ // clang-format on ++}; + -+/* get some data flow parameters, only capability, fps and readbuffers has -+ * effect on this driver -+ * called on VIDIOC_S_PARM -+ */ -+static int vidioc_s_parm(struct file *file, void *priv, -+ struct v4l2_streamparm *parm) -+{ -+ struct v4l2_loopback_device *dev; -+ int err = 0; -+ MARK(); ++/* module structures */ ++struct v4l2loopback_private { ++ int device_nr; ++}; + -+ dev = v4l2loopback_getdevice(file); -+ dprintk("vidioc_s_parm called frate=%d/%d\n", -+ parm->parm.capture.timeperframe.numerator, -+ parm->parm.capture.timeperframe.denominator); ++/* TODO(vasaka) use typenames which are common to kernel, but first find out if ++ * it is needed */ ++/* struct keeping state and settings of loopback device */ + -+ switch (parm->type) { -+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: -+ if ((err = set_timeperframe( -+ dev, &parm->parm.capture.timeperframe)) < 0) -+ return err; -+ break; -+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: -+ if ((err = set_timeperframe( -+ dev, &parm->parm.capture.timeperframe)) < 0) -+ return err; -+ break; -+ default: -+ return -1; -+ } ++struct v4l2l_buffer { ++ struct v4l2_buffer buffer; ++ struct list_head list_head; ++ int use_count; ++}; + -+ parm->parm.capture = dev->capture_param; -+ return 0; -+} ++struct v4l2_loopback_device { ++ struct v4l2_device v4l2_dev; ++ struct v4l2_ctrl_handler ctrl_handler; ++ struct video_device *vdev; ++ /* pixel and stream format */ ++ struct v4l2_pix_format pix_format; ++ bool pix_format_has_valid_sizeimage; ++ struct v4l2_captureparm capture_param; ++ unsigned long frame_jiffies; + -+#ifdef V4L2LOOPBACK_WITH_STD -+/* sets a tv standard, actually we do not need to handle this any special way -+ * added to support effecttv -+ * called on VIDIOC_S_STD -+ */ -+static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *_std) -+{ -+ v4l2_std_id req_std = 0, supported_std = 0; -+ const v4l2_std_id all_std = V4L2_STD_ALL, no_std = 0; ++ /* ctrls */ ++ int keep_format; /* CID_KEEP_FORMAT; stay ready_for_capture even when all ++ openers close() the device */ ++ int sustain_framerate; /* CID_SUSTAIN_FRAMERATE; duplicate frames to maintain ++ (close to) nominal framerate */ + -+ if (_std) { -+ req_std = *_std; -+ *_std = all_std; -+ } ++ /* buffers stuff */ ++ u8 *image; /* pointer to actual buffers data */ ++ unsigned long int imagesize; /* size of buffers data */ ++ int buffers_number; /* should not be big, 4 is a good choice */ ++ struct v4l2l_buffer buffers[MAX_BUFFERS]; /* inner driver buffers */ ++ int used_buffers; /* number of the actually used buffers */ ++ int max_openers; /* how many times can this device be opened */ + -+ /* we support everything in V4L2_STD_ALL, but not more... */ -+ supported_std = (all_std & req_std); -+ if (no_std == supported_std) -+ return -EINVAL; ++ s64 write_position; /* number of last written frame + 1 */ ++ struct list_head outbufs_list; /* buffers in output DQBUF order */ ++ int bufpos2index ++ [MAX_BUFFERS]; /* mapping of (read/write_position % used_buffers) ++ * to inner buffer index */ ++ long buffer_size; + -+ return 0; -+} ++ /* sustain_framerate stuff */ ++ struct timer_list sustain_timer; ++ unsigned int reread_count; + -+/* gets a fake video standard -+ * called on VIDIOC_G_STD -+ */ -+static int vidioc_g_std(struct file *file, void *fh, v4l2_std_id *norm) -+{ -+ if (norm) -+ *norm = V4L2_STD_ALL; -+ return 0; -+} -+/* gets a fake video standard -+ * called on VIDIOC_QUERYSTD -+ */ -+static int vidioc_querystd(struct file *file, void *fh, v4l2_std_id *norm) -+{ -+ if (norm) -+ *norm = V4L2_STD_ALL; -+ return 0; -+} -+#endif /* V4L2LOOPBACK_WITH_STD */ ++ /* timeout stuff */ ++ unsigned long timeout_jiffies; /* CID_TIMEOUT; 0 means disabled */ ++ int timeout_image_io; /* CID_TIMEOUT_IMAGE_IO; next opener will ++ * read/write to timeout_image */ ++ u8 *timeout_image; /* copy of it will be captured when timeout passes */ ++ struct v4l2l_buffer timeout_image_buffer; ++ struct timer_list timeout_timer; ++ int timeout_happened; + -+static int v4l2loopback_set_ctrl(struct v4l2_loopback_device *dev, u32 id, -+ s64 val) -+{ -+ switch (id) { -+ case CID_KEEP_FORMAT: -+ if (val < 0 || val > 1) -+ return -EINVAL; -+ dev->keep_format = val; -+ try_free_buffers( -+ dev); /* will only free buffers if !keep_format */ -+ break; -+ case CID_SUSTAIN_FRAMERATE: -+ if (val < 0 || val > 1) -+ return -EINVAL; -+ spin_lock_bh(&dev->lock); -+ dev->sustain_framerate = val; -+ check_timers(dev); -+ spin_unlock_bh(&dev->lock); -+ break; -+ case CID_TIMEOUT: -+ if (val < 0 || val > MAX_TIMEOUT) -+ return -EINVAL; -+ spin_lock_bh(&dev->lock); -+ dev->timeout_jiffies = msecs_to_jiffies(val); -+ check_timers(dev); -+ spin_unlock_bh(&dev->lock); -+ allocate_timeout_image(dev); -+ break; -+ case CID_TIMEOUT_IMAGE_IO: -+ dev->timeout_image_io = 1; -+ break; -+ default: -+ return -EINVAL; -+ } -+ return 0; -+} ++ /* sync stuff */ ++ atomic_t open_count; + -+static int v4l2loopback_s_ctrl(struct v4l2_ctrl *ctrl) -+{ -+ struct v4l2_loopback_device *dev = container_of( -+ ctrl->handler, struct v4l2_loopback_device, ctrl_handler); -+ return v4l2loopback_set_ctrl(dev, ctrl->id, ctrl->val); -+} ++ int ready_for_capture; /* set to the number of writers that opened the ++ * device and negotiated format. */ ++ int ready_for_output; /* set to true when no writer is currently attached ++ * this differs slightly from !ready_for_capture, ++ * e.g. when using fallback images */ ++ int active_readers; /* increase if any reader starts streaming */ ++ int announce_all_caps; /* set to false, if device caps (OUTPUT/CAPTURE) ++ * should only be announced if the resp. "ready" ++ * flag is set; default=TRUE */ + -+/* returns set of device outputs, in our case there is only one -+ * called on VIDIOC_ENUMOUTPUT -+ */ -+static int vidioc_enum_output(struct file *file, void *fh, -+ struct v4l2_output *outp) -+{ -+ __u32 index = outp->index; -+ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); -+ MARK(); ++ int min_width, max_width; ++ int min_height, max_height; + -+ if (!dev->announce_all_caps && !dev->ready_for_output) -+ return -ENOTTY; ++ char card_label[32]; + -+ if (0 != index) -+ return -EINVAL; ++ wait_queue_head_t read_event; ++ spinlock_t lock, list_lock; ++}; + -+ /* clear all data (including the reserved fields) */ -+ memset(outp, 0, sizeof(*outp)); ++/* types of opener shows what opener wants to do with loopback */ ++enum opener_type { ++ // clang-format off ++ UNNEGOTIATED = 0, ++ READER = 1, ++ WRITER = 2, ++ // clang-format on ++}; + -+ outp->index = index; -+ strscpy(outp->name, "loopback in", sizeof(outp->name)); -+ outp->type = V4L2_OUTPUT_TYPE_ANALOG; -+ outp->audioset = 0; -+ outp->modulator = 0; -+#ifdef V4L2LOOPBACK_WITH_STD -+ outp->std = V4L2_STD_ALL; -+#ifdef V4L2_OUT_CAP_STD -+ outp->capabilities |= V4L2_OUT_CAP_STD; -+#endif /* V4L2_OUT_CAP_STD */ -+#endif /* V4L2LOOPBACK_WITH_STD */ ++/* struct keeping state and type of opener */ ++struct v4l2_loopback_opener { ++ enum opener_type type; ++ s64 read_position; /* number of last processed frame + 1 or ++ * write_position - 1 if reader went out of sync */ ++ unsigned int reread_count; ++ struct v4l2_buffer *buffers; ++ int buffers_number; /* should not be big, 4 is a good choice */ ++ int timeout_image_io; + -+ return 0; -+} ++ struct v4l2_fh fh; ++}; + -+/* which output is currently active, -+ * called on VIDIOC_G_OUTPUT -+ */ -+static int vidioc_g_output(struct file *file, void *fh, unsigned int *i) ++#define fh_to_opener(ptr) container_of((ptr), struct v4l2_loopback_opener, fh) ++ ++/* this is heavily inspired by the bttv driver found in the linux kernel */ ++struct v4l2l_format { ++ char *name; ++ int fourcc; /* video4linux 2 */ ++ int depth; /* bit/pixel */ ++ int flags; ++}; ++/* set the v4l2l_format.flags to PLANAR for non-packed formats */ ++#define FORMAT_FLAGS_PLANAR 0x01 ++#define FORMAT_FLAGS_COMPRESSED 0x02 ++ ++#include "v4l2loopback_formats.h" ++ ++#ifndef V4L2_TYPE_IS_CAPTURE ++#define V4L2_TYPE_IS_CAPTURE(type) \ ++ ((type) == V4L2_BUF_TYPE_VIDEO_CAPTURE || \ ++ (type) == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) ++#endif /* V4L2_TYPE_IS_CAPTURE */ ++#ifndef V4L2_TYPE_IS_OUTPUT ++#define V4L2_TYPE_IS_OUTPUT(type) \ ++ ((type) == V4L2_BUF_TYPE_VIDEO_OUTPUT || \ ++ (type) == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ++#endif /* V4L2_TYPE_IS_OUTPUT */ ++ ++/* whether the format can be changed */ ++/* the format is fixated if we ++ - have writers (ready_for_capture>0) ++ - and/or have readers (active_readers>0) ++*/ ++#define V4L2LOOPBACK_IS_FIXED_FMT(device) \ ++ (device->ready_for_capture > 0 || device->active_readers > 0 || \ ++ device->keep_format) ++ ++static const unsigned int FORMATS = ARRAY_SIZE(formats); ++ ++static char *fourcc2str(unsigned int fourcc, char buf[4]) +{ -+ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); -+ if (!dev->announce_all_caps && !dev->ready_for_output) -+ return -ENOTTY; -+ if (i) -+ *i = 0; -+ return 0; ++ buf[0] = (fourcc >> 0) & 0xFF; ++ buf[1] = (fourcc >> 8) & 0xFF; ++ buf[2] = (fourcc >> 16) & 0xFF; ++ buf[3] = (fourcc >> 24) & 0xFF; ++ ++ return buf; +} + -+/* set output, can make sense if we have more than one video src, -+ * called on VIDIOC_S_OUTPUT -+ */ -+static int vidioc_s_output(struct file *file, void *fh, unsigned int i) ++static const struct v4l2l_format *format_by_fourcc(int fourcc) +{ -+ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); -+ if (!dev->announce_all_caps && !dev->ready_for_output) -+ return -ENOTTY; ++ unsigned int i; + -+ if (i) -+ return -EINVAL; ++ for (i = 0; i < FORMATS; i++) { ++ if (formats[i].fourcc == fourcc) ++ return formats + i; ++ } + -+ return 0; ++ dprintk("unsupported format '%c%c%c%c'\n", (fourcc >> 0) & 0xFF, ++ (fourcc >> 8) & 0xFF, (fourcc >> 16) & 0xFF, ++ (fourcc >> 24) & 0xFF); ++ return NULL; +} + -+/* returns set of device inputs, in our case there is only one, -+ * but later I may add more -+ * called on VIDIOC_ENUMINPUT -+ */ -+static int vidioc_enum_input(struct file *file, void *fh, -+ struct v4l2_input *inp) ++static void pix_format_set_size(struct v4l2_pix_format *f, ++ const struct v4l2l_format *fmt, ++ unsigned int width, unsigned int height) +{ -+ struct v4l2_loopback_device *dev; -+ __u32 index = inp->index; -+ MARK(); ++ f->width = width; ++ f->height = height; + -+ if (0 != index) -+ return -EINVAL; ++ if (fmt->flags & FORMAT_FLAGS_PLANAR) { ++ f->bytesperline = width; /* Y plane */ ++ f->sizeimage = (width * height * fmt->depth) >> 3; ++ } else if (fmt->flags & FORMAT_FLAGS_COMPRESSED) { ++ /* doesn't make sense for compressed formats */ ++ f->bytesperline = 0; ++ f->sizeimage = (width * height * fmt->depth) >> 3; ++ } else { ++ f->bytesperline = (width * fmt->depth) >> 3; ++ f->sizeimage = height * f->bytesperline; ++ } ++} + -+ /* clear all data (including the reserved fields) */ -+ memset(inp, 0, sizeof(*inp)); ++static int v4l2l_fill_format(struct v4l2_format *fmt, int capture, ++ const u32 minwidth, const u32 maxwidth, ++ const u32 minheight, const u32 maxheight) ++{ ++ u32 width = fmt->fmt.pix.width, height = fmt->fmt.pix.height; ++ u32 pixelformat = fmt->fmt.pix.pixelformat; ++ struct v4l2_format fmt0 = *fmt; ++ u32 bytesperline = 0, sizeimage = 0; ++ if (!width) ++ width = V4L2LOOPBACK_SIZE_DEFAULT_WIDTH; ++ if (!height) ++ height = V4L2LOOPBACK_SIZE_DEFAULT_HEIGHT; ++ if (width < minwidth) ++ width = minwidth; ++ if (width > maxwidth) ++ width = maxwidth; ++ if (height < minheight) ++ height = minheight; ++ if (height > maxheight) ++ height = maxheight; + -+ inp->index = index; -+ strscpy(inp->name, "loopback", sizeof(inp->name)); -+ inp->type = V4L2_INPUT_TYPE_CAMERA; -+ inp->audioset = 0; -+ inp->tuner = 0; -+ inp->status = 0; ++ /* sets: width,height,pixelformat,bytesperline,sizeimage */ ++ if (!(V4L2_TYPE_IS_MULTIPLANAR(fmt0.type))) { ++ fmt0.fmt.pix.bytesperline = 0; ++ fmt0.fmt.pix.sizeimage = 0; ++ } + -+#ifdef V4L2LOOPBACK_WITH_STD -+ inp->std = V4L2_STD_ALL; -+#ifdef V4L2_IN_CAP_STD -+ inp->capabilities |= V4L2_IN_CAP_STD; ++ if (0) { ++ ; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0) ++ } else if (!v4l2_fill_pixfmt(&fmt0.fmt.pix, pixelformat, width, ++ height)) { ++ ; ++ } else if (!v4l2_fill_pixfmt_mp(&fmt0.fmt.pix_mp, pixelformat, width, ++ height)) { ++ ; +#endif -+#endif /* V4L2LOOPBACK_WITH_STD */ ++ } else { ++ const struct v4l2l_format *format = ++ format_by_fourcc(pixelformat); ++ if (!format) ++ return -EINVAL; ++ pix_format_set_size(&fmt0.fmt.pix, format, width, height); ++ fmt0.fmt.pix.pixelformat = format->fourcc; ++ } + -+ dev = v4l2loopback_getdevice(file); -+ if (!dev->ready_for_capture) { -+ inp->status |= V4L2_IN_ST_NO_SIGNAL; ++ if (V4L2_TYPE_IS_MULTIPLANAR(fmt0.type)) { ++ *fmt = fmt0; ++ ++ if ((fmt->fmt.pix_mp.colorspace == V4L2_COLORSPACE_DEFAULT) || ++ (fmt->fmt.pix_mp.colorspace > V4L2_COLORSPACE_DCI_P3)) ++ fmt->fmt.pix_mp.colorspace = V4L2_COLORSPACE_SRGB; ++ if (V4L2_FIELD_ANY == fmt->fmt.pix_mp.field) ++ fmt->fmt.pix_mp.field = V4L2_FIELD_NONE; ++ if (capture) ++ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; ++ else ++ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; ++ } else { ++ bytesperline = fmt->fmt.pix.bytesperline; ++ sizeimage = fmt->fmt.pix.sizeimage; ++ ++ *fmt = fmt0; ++ ++ if (!fmt->fmt.pix.bytesperline) ++ fmt->fmt.pix.bytesperline = bytesperline; ++ if (!fmt->fmt.pix.sizeimage) ++ fmt->fmt.pix.sizeimage = sizeimage; ++ ++ if ((fmt->fmt.pix.colorspace == V4L2_COLORSPACE_DEFAULT) || ++ (fmt->fmt.pix.colorspace > V4L2_COLORSPACE_DCI_P3)) ++ fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; ++ if (V4L2_FIELD_ANY == fmt->fmt.pix.field) ++ fmt->fmt.pix.field = V4L2_FIELD_NONE; ++ if (capture) ++ fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ++ else ++ fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; + } + + return 0; +} + -+/* which input is currently active, -+ * called on VIDIOC_G_INPUT -+ */ -+static int vidioc_g_input(struct file *file, void *fh, unsigned int *i) ++/* Checks if v4l2l_fill_format() has set a valid, fixed sizeimage val. */ ++static bool v4l2l_pix_format_has_valid_sizeimage(struct v4l2_format *fmt) +{ -+ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); -+ if (!dev->announce_all_caps && !dev->ready_for_capture) -+ return -ENOTTY; -+ if (i) -+ *i = 0; -+ return 0; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 2, 0) ++ const struct v4l2_format_info *info; ++ ++ info = v4l2_format_info(fmt->fmt.pix.pixelformat); ++ if (info && info->mem_planes == 1) ++ return true; ++#endif ++ ++ return false; +} + -+/* set input, can make sense if we have more than one video src, -+ * called on VIDIOC_S_INPUT -+ */ -+static int vidioc_s_input(struct file *file, void *fh, unsigned int i) ++static int pix_format_eq(const struct v4l2_pix_format *ref, ++ const struct v4l2_pix_format *tgt, int strict) +{ -+ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); -+ if (!dev->announce_all_caps && !dev->ready_for_capture) -+ return -ENOTTY; -+ if (i == 0) -+ return 0; -+ return -EINVAL; ++ /* check if the two formats are equivalent. ++ * ANY fields are handled gracefully ++ */ ++#define _pix_format_eq0(x) \ ++ if (ref->x != tgt->x) \ ++ result = 0 ++#define _pix_format_eq1(x, def) \ ++ do { \ ++ if ((def != tgt->x) && (ref->x != tgt->x)) { \ ++ printk(KERN_INFO #x " failed"); \ ++ result = 0; \ ++ } \ ++ } while (0) ++ int result = 1; ++ _pix_format_eq0(width); ++ _pix_format_eq0(height); ++ _pix_format_eq0(pixelformat); ++ if (!strict) ++ return result; ++ _pix_format_eq1(field, V4L2_FIELD_ANY); ++ _pix_format_eq0(bytesperline); ++ _pix_format_eq0(sizeimage); ++ _pix_format_eq1(colorspace, V4L2_COLORSPACE_DEFAULT); ++ return result; +} + -+/* --------------- V4L2 ioctl buffer related calls ----------------- */ -+ -+/* negotiate buffer type -+ * only mmap streaming supported -+ * called on VIDIOC_REQBUFS -+ */ -+static int vidioc_reqbufs(struct file *file, void *fh, -+ struct v4l2_requestbuffers *b) ++static struct v4l2_loopback_device *v4l2loopback_getdevice(struct file *f); ++static int inner_try_setfmt(struct file *file, struct v4l2_format *fmt) +{ ++ int capture = V4L2_TYPE_IS_CAPTURE(fmt->type); + struct v4l2_loopback_device *dev; -+ struct v4l2_loopback_opener *opener; -+ int i; -+ MARK(); ++ int needschange = 0; ++ char buf[5]; ++ buf[4] = 0; + + dev = v4l2loopback_getdevice(file); -+ opener = fh_to_opener(fh); -+ -+ dprintk("reqbufs: %d\t%d=%d\n", b->memory, b->count, -+ dev->buffers_number); + -+ if (opener->timeout_image_io) { -+ dev->timeout_image_io = 0; -+ if (b->memory != V4L2_MEMORY_MMAP) -+ return -EINVAL; -+ b->count = 2; -+ return 0; ++ needschange = !(pix_format_eq(&dev->pix_format, &fmt->fmt.pix, 0)); ++ if (V4L2LOOPBACK_IS_FIXED_FMT(dev)) { ++ fmt->fmt.pix = dev->pix_format; ++ if (needschange) { ++ if (dev->active_readers > 0 && capture) { ++ /* cannot call fmt_cap while there are readers */ ++ return -EBUSY; ++ } ++ if (dev->ready_for_capture > 0 && !capture) { ++ /* cannot call fmt_out while there are writers */ ++ return -EBUSY; ++ } ++ } + } -+ -+ if (V4L2_TYPE_IS_OUTPUT(b->type) && (!dev->ready_for_output)) { -+ return -EBUSY; ++ if (v4l2l_fill_format(fmt, capture, dev->min_width, dev->max_width, ++ dev->min_height, dev->max_height) != 0) { ++ return -EINVAL; + } + -+ init_buffers(dev); -+ switch (b->memory) { -+ case V4L2_MEMORY_MMAP: -+ /* do nothing here, buffers are always allocated */ -+ if (b->count < 1 || dev->buffers_number < 1) -+ return 0; -+ -+ if (b->count > dev->buffers_number) -+ b->count = dev->buffers_number; ++ if (1) { ++ char buf[5]; ++ buf[4] = 0; ++ dprintk("capFOURCC=%s\n", ++ fourcc2str(dev->pix_format.pixelformat, buf)); ++ } ++ return 0; ++} + -+ /* make sure that outbufs_list contains buffers from 0 to used_buffers-1 -+ * actually, it will have been already populated via v4l2_loopback_init() -+ * at this point */ -+ if (list_empty(&dev->outbufs_list)) { -+ for (i = 0; i < dev->used_buffers; ++i) -+ list_add_tail(&dev->buffers[i].list_head, -+ &dev->outbufs_list); -+ } ++static int set_timeperframe(struct v4l2_loopback_device *dev, ++ struct v4l2_fract *tpf) ++{ ++ if ((tpf->denominator < 1) || (tpf->numerator < 1)) { ++ return -EINVAL; ++ } ++ dev->capture_param.timeperframe = *tpf; ++ dev->frame_jiffies = max(1UL, msecs_to_jiffies(1000) * tpf->numerator / ++ tpf->denominator); ++ return 0; ++} + -+ /* also, if dev->used_buffers is going to be decreased, we should remove -+ * out-of-range buffers from outbufs_list, and fix bufpos2index mapping */ -+ if (b->count < dev->used_buffers) { -+ struct v4l2l_buffer *pos, *n; ++static struct v4l2_loopback_device *v4l2loopback_cd2dev(struct device *cd); + -+ list_for_each_entry_safe(pos, n, &dev->outbufs_list, -+ list_head) { -+ if (pos->buffer.index >= b->count) -+ list_del(&pos->list_head); -+ } ++/* device attributes */ ++/* available via sysfs: /sys/devices/virtual/video4linux/video* */ + -+ /* after we update dev->used_buffers, buffers in outbufs_list will -+ * correspond to dev->write_position + [0;b->count-1] range */ -+ i = v4l2l_mod64(dev->write_position, b->count); -+ list_for_each_entry(pos, &dev->outbufs_list, -+ list_head) { -+ dev->bufpos2index[i % b->count] = -+ pos->buffer.index; -+ ++i; -+ } -+ } ++static ssize_t attr_show_format(struct device *cd, ++ struct device_attribute *attr, char *buf) ++{ ++ /* gets the current format as "FOURCC:WxH@f/s", e.g. "YUYV:320x240@1000/30" */ ++ struct v4l2_loopback_device *dev = v4l2loopback_cd2dev(cd); ++ const struct v4l2_fract *tpf; ++ char buf4cc[5], buf_fps[32]; + -+ opener->buffers_number = b->count; -+ if (opener->buffers_number < dev->used_buffers) -+ dev->used_buffers = opener->buffers_number; ++ if (!dev || !V4L2LOOPBACK_IS_FIXED_FMT(dev)) + return 0; -+ default: -+ return -EINVAL; -+ } ++ tpf = &dev->capture_param.timeperframe; ++ ++ fourcc2str(dev->pix_format.pixelformat, buf4cc); ++ buf4cc[4] = 0; ++ if (tpf->numerator == 1) ++ snprintf(buf_fps, sizeof(buf_fps), "%d", tpf->denominator); ++ else ++ snprintf(buf_fps, sizeof(buf_fps), "%d/%d", tpf->denominator, ++ tpf->numerator); ++ return sprintf(buf, "%4s:%dx%d@%s\n", buf4cc, dev->pix_format.width, ++ dev->pix_format.height, buf_fps); +} + -+/* returns buffer asked for; -+ * give app as many buffers as it wants, if it less than MAX, -+ * but map them in our inner buffers -+ * called on VIDIOC_QUERYBUF -+ */ -+static int vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) ++static ssize_t attr_store_format(struct device *cd, ++ struct device_attribute *attr, const char *buf, ++ size_t len) +{ -+ enum v4l2_buf_type type; -+ int index; -+ struct v4l2_loopback_device *dev; -+ struct v4l2_loopback_opener *opener; -+ -+ MARK(); ++ struct v4l2_loopback_device *dev = v4l2loopback_cd2dev(cd); ++ int fps_num = 0, fps_den = 1; + -+ type = b->type; -+ index = b->index; -+ dev = v4l2loopback_getdevice(file); -+ opener = fh_to_opener(fh); ++ if (!dev) ++ return -ENODEV; + -+ if ((b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && -+ (b->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)) { -+ return -EINVAL; ++ /* only fps changing is supported */ ++ if (sscanf(buf, "@%d/%d", &fps_num, &fps_den) > 0) { ++ struct v4l2_fract f = { .numerator = fps_den, ++ .denominator = fps_num }; ++ int err = 0; ++ if ((err = set_timeperframe(dev, &f)) < 0) ++ return err; ++ return len; + } -+ if (b->index > max_buffers) -+ return -EINVAL; ++ return -EINVAL; ++} + -+ if (opener->timeout_image_io) -+ *b = dev->timeout_image_buffer.buffer; -+ else -+ *b = dev->buffers[b->index % dev->used_buffers].buffer; ++static DEVICE_ATTR(format, S_IRUGO | S_IWUSR, attr_show_format, ++ attr_store_format); + -+ b->type = type; -+ b->index = index; -+ dprintkrw("buffer type: %d (of %d with size=%ld)\n", b->memory, -+ dev->buffers_number, dev->buffer_size); ++static ssize_t attr_show_buffers(struct device *cd, ++ struct device_attribute *attr, char *buf) ++{ ++ struct v4l2_loopback_device *dev = v4l2loopback_cd2dev(cd); + -+ /* Hopefully fix 'DQBUF return bad index if queue bigger then 2 for capture' -+ https://github.com/umlaeute/v4l2loopback/issues/60 */ -+ b->flags &= ~V4L2_BUF_FLAG_DONE; -+ b->flags |= V4L2_BUF_FLAG_QUEUED; ++ if (!dev) ++ return -ENODEV; + -+ return 0; ++ return sprintf(buf, "%d\n", dev->used_buffers); +} + -+static void buffer_written(struct v4l2_loopback_device *dev, -+ struct v4l2l_buffer *buf) -+{ -+ del_timer_sync(&dev->sustain_timer); -+ del_timer_sync(&dev->timeout_timer); ++static DEVICE_ATTR(buffers, S_IRUGO, attr_show_buffers, NULL); + -+ spin_lock_bh(&dev->list_lock); -+ list_move_tail(&buf->list_head, &dev->outbufs_list); -+ spin_unlock_bh(&dev->list_lock); ++static ssize_t attr_show_maxopeners(struct device *cd, ++ struct device_attribute *attr, char *buf) ++{ ++ struct v4l2_loopback_device *dev = v4l2loopback_cd2dev(cd); + -+ spin_lock_bh(&dev->lock); -+ dev->bufpos2index[v4l2l_mod64(dev->write_position, dev->used_buffers)] = -+ buf->buffer.index; -+ ++dev->write_position; -+ dev->reread_count = 0; ++ if (!dev) ++ return -ENODEV; + -+ check_timers(dev); -+ spin_unlock_bh(&dev->lock); ++ return sprintf(buf, "%d\n", dev->max_openers); +} + -+/* put buffer to queue -+ * called on VIDIOC_QBUF -+ */ -+static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) ++static ssize_t attr_store_maxopeners(struct device *cd, ++ struct device_attribute *attr, ++ const char *buf, size_t len) +{ -+ struct v4l2_loopback_device *dev; -+ struct v4l2_loopback_opener *opener; -+ struct v4l2l_buffer *b; -+ int index; ++ struct v4l2_loopback_device *dev = NULL; ++ unsigned long curr = 0; + -+ dev = v4l2loopback_getdevice(file); -+ opener = fh_to_opener(fh); ++ if (kstrtoul(buf, 0, &curr)) ++ return -EINVAL; + -+ if (buf->index > max_buffers) ++ dev = v4l2loopback_cd2dev(cd); ++ if (!dev) ++ return -ENODEV; ++ ++ if (dev->max_openers == curr) ++ return len; ++ ++ if (curr > __INT_MAX__ || dev->open_count.counter > curr) { ++ /* request to limit to less openers as are currently attached to us */ + return -EINVAL; -+ if (opener->timeout_image_io) -+ return 0; ++ } + -+ index = buf->index % dev->used_buffers; -+ b = &dev->buffers[index]; ++ dev->max_openers = (int)curr; + -+ switch (buf->type) { -+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: -+ dprintkrw( -+ "qbuf(CAPTURE)#%d: buffer#%d @ %p type=%d bytesused=%d length=%d flags=%x field=%d timestamp=%lld.%06ld sequence=%d\n", -+ index, buf->index, buf, buf->type, buf->bytesused, -+ buf->length, buf->flags, buf->field, -+ (long long)buf->timestamp.tv_sec, -+ (long int)buf->timestamp.tv_usec, buf->sequence); -+ set_queued(b); -+ return 0; -+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: -+ dprintkrw( -+ "qbuf(OUTPUT)#%d: buffer#%d @ %p type=%d bytesused=%d length=%d flags=%x field=%d timestamp=%lld.%06ld sequence=%d\n", -+ index, buf->index, buf, buf->type, buf->bytesused, -+ buf->length, buf->flags, buf->field, -+ (long long)buf->timestamp.tv_sec, -+ (long int)buf->timestamp.tv_usec, buf->sequence); -+ if ((!(b->buffer.flags & V4L2_BUF_FLAG_TIMESTAMP_COPY)) && -+ (buf->timestamp.tv_sec == 0 && buf->timestamp.tv_usec == 0)) -+ v4l2l_get_timestamp(&b->buffer); -+ else { -+ b->buffer.timestamp = buf->timestamp; -+ b->buffer.flags |= V4L2_BUF_FLAG_TIMESTAMP_COPY; -+ } -+ if (dev->pix_format_has_valid_sizeimage) { -+ if (buf->bytesused >= dev->pix_format.sizeimage) { -+ b->buffer.bytesused = dev->pix_format.sizeimage; -+ } else { -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) -+ dev_warn_ratelimited( -+ &dev->vdev->dev, -+#else -+ dprintkrw( -+#endif -+ "warning queued output buffer bytesused too small %d < %d\n", -+ buf->bytesused, -+ dev->pix_format.sizeimage); -+ b->buffer.bytesused = buf->bytesused; -+ } -+ } else { -+ b->buffer.bytesused = buf->bytesused; -+ } -+ -+ set_done(b); -+ buffer_written(dev, b); -+ -+ /* Hopefully fix 'DQBUF return bad index if queue bigger then 2 for capture' -+ https://github.com/umlaeute/v4l2loopback/issues/60 */ -+ buf->flags &= ~V4L2_BUF_FLAG_DONE; -+ buf->flags |= V4L2_BUF_FLAG_QUEUED; -+ -+ wake_up_all(&dev->read_event); -+ return 0; -+ default: -+ return -EINVAL; -+ } ++ return len; +} + -+static int can_read(struct v4l2_loopback_device *dev, -+ struct v4l2_loopback_opener *opener) -+{ -+ int ret; -+ -+ spin_lock_bh(&dev->lock); -+ check_timers(dev); -+ ret = dev->write_position > opener->read_position || -+ dev->reread_count > opener->reread_count || dev->timeout_happened; -+ spin_unlock_bh(&dev->lock); -+ return ret; -+} ++static DEVICE_ATTR(max_openers, S_IRUGO | S_IWUSR, attr_show_maxopeners, ++ attr_store_maxopeners); + -+static int get_capture_buffer(struct file *file) ++static ssize_t attr_show_state(struct device *cd, struct device_attribute *attr, ++ char *buf) +{ -+ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); -+ struct v4l2_loopback_opener *opener = fh_to_opener(file->private_data); -+ int pos, ret; -+ int timeout_happened; ++ struct v4l2_loopback_device *dev = v4l2loopback_cd2dev(cd); + -+ if ((file->f_flags & O_NONBLOCK) && -+ (dev->write_position <= opener->read_position && -+ dev->reread_count <= opener->reread_count && -+ !dev->timeout_happened)) -+ return -EAGAIN; -+ wait_event_interruptible(dev->read_event, can_read(dev, opener)); ++ if (!dev) ++ return -ENODEV; + -+ spin_lock_bh(&dev->lock); -+ if (dev->write_position == opener->read_position) { -+ if (dev->reread_count > opener->reread_count + 2) -+ opener->reread_count = dev->reread_count - 1; -+ ++opener->reread_count; -+ pos = v4l2l_mod64(opener->read_position + dev->used_buffers - 1, -+ dev->used_buffers); -+ } else { -+ opener->reread_count = 0; -+ if (dev->write_position > -+ opener->read_position + dev->used_buffers) -+ opener->read_position = dev->write_position - 1; -+ pos = v4l2l_mod64(opener->read_position, dev->used_buffers); -+ ++opener->read_position; -+ } -+ timeout_happened = dev->timeout_happened; -+ dev->timeout_happened = 0; -+ spin_unlock_bh(&dev->lock); ++ if (dev->ready_for_capture) ++ return sprintf(buf, "capture\n"); ++ if (dev->ready_for_output) ++ return sprintf(buf, "output\n"); + -+ ret = dev->bufpos2index[pos]; -+ if (timeout_happened) { -+ if (ret < 0) { -+ dprintk("trying to return not mapped buf[%d]\n", ret); -+ return -EFAULT; -+ } -+ /* although allocated on-demand, timeout_image is freed only -+ * in free_buffers(), so we don't need to worry about it being -+ * deallocated suddenly */ -+ memcpy(dev->image + dev->buffers[ret].buffer.m.offset, -+ dev->timeout_image, dev->buffer_size); -+ } -+ return ret; ++ return -EAGAIN; +} + -+/* put buffer to dequeue -+ * called on VIDIOC_DQBUF -+ */ -+static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) ++static DEVICE_ATTR(state, S_IRUGO, attr_show_state, NULL); ++ ++static void v4l2loopback_remove_sysfs(struct video_device *vdev) +{ -+ struct v4l2_loopback_device *dev; -+ struct v4l2_loopback_opener *opener; -+ int index; -+ struct v4l2l_buffer *b; ++#define V4L2_SYSFS_DESTROY(x) device_remove_file(&vdev->dev, &dev_attr_##x) + -+ dev = v4l2loopback_getdevice(file); -+ opener = fh_to_opener(fh); -+ if (opener->timeout_image_io) { -+ *buf = dev->timeout_image_buffer.buffer; -+ return 0; ++ if (vdev) { ++ V4L2_SYSFS_DESTROY(format); ++ V4L2_SYSFS_DESTROY(buffers); ++ V4L2_SYSFS_DESTROY(max_openers); ++ V4L2_SYSFS_DESTROY(state); ++ /* ... */ + } ++} + -+ switch (buf->type) { -+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: -+ index = get_capture_buffer(file); -+ if (index < 0) -+ return index; -+ dprintkrw("capture DQBUF pos: %lld index: %d\n", -+ (long long)(opener->read_position - 1), index); -+ if (!(dev->buffers[index].buffer.flags & -+ V4L2_BUF_FLAG_MAPPED)) { -+ dprintk("trying to return not mapped buf[%d]\n", index); -+ return -EINVAL; -+ } -+ unset_flags(&dev->buffers[index]); -+ *buf = dev->buffers[index].buffer; -+ dprintkrw( -+ "dqbuf(CAPTURE)#%d: buffer#%d @ %p type=%d bytesused=%d length=%d flags=%x field=%d timestamp=%lld.%06ld sequence=%d\n", -+ index, buf->index, buf, buf->type, buf->bytesused, -+ buf->length, buf->flags, buf->field, -+ (long long)buf->timestamp.tv_sec, -+ (long int)buf->timestamp.tv_usec, buf->sequence); -+ return 0; -+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: -+ spin_lock_bh(&dev->list_lock); ++static void v4l2loopback_create_sysfs(struct video_device *vdev) ++{ ++ int res = 0; + -+ b = list_entry(dev->outbufs_list.prev, struct v4l2l_buffer, -+ list_head); -+ list_move_tail(&b->list_head, &dev->outbufs_list); ++#define V4L2_SYSFS_CREATE(x) \ ++ res = device_create_file(&vdev->dev, &dev_attr_##x); \ ++ if (res < 0) \ ++ break ++ if (!vdev) ++ return; ++ do { ++ V4L2_SYSFS_CREATE(format); ++ V4L2_SYSFS_CREATE(buffers); ++ V4L2_SYSFS_CREATE(max_openers); ++ V4L2_SYSFS_CREATE(state); ++ /* ... */ ++ } while (0); + -+ spin_unlock_bh(&dev->list_lock); -+ dprintkrw("output DQBUF index: %d\n", b->buffer.index); -+ unset_flags(b); -+ *buf = b->buffer; -+ buf->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; -+ dprintkrw( -+ "dqbuf(OUTPUT)#%d: buffer#%d @ %p type=%d bytesused=%d length=%d flags=%x field=%d timestamp=%lld.%06ld sequence=%d\n", -+ index, buf->index, buf, buf->type, buf->bytesused, -+ buf->length, buf->flags, buf->field, -+ (long long)buf->timestamp.tv_sec, -+ (long int)buf->timestamp.tv_usec, buf->sequence); -+ return 0; -+ default: -+ return -EINVAL; -+ } ++ if (res >= 0) ++ return; ++ dev_err(&vdev->dev, "%s error: %d\n", __func__, res); +} + -+/* ------------- STREAMING ------------------- */ ++/* Event APIs */ + -+/* start streaming -+ * called on VIDIOC_STREAMON -+ */ -+static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type type) -+{ -+ struct v4l2_loopback_device *dev; -+ struct v4l2_loopback_opener *opener; -+ MARK(); ++#define V4L2LOOPBACK_EVENT_BASE (V4L2_EVENT_PRIVATE_START) ++#define V4L2LOOPBACK_EVENT_OFFSET 0x08E00000 ++#define V4L2_EVENT_PRI_CLIENT_USAGE \ ++ (V4L2LOOPBACK_EVENT_BASE + V4L2LOOPBACK_EVENT_OFFSET + 1) + -+ dev = v4l2loopback_getdevice(file); -+ opener = fh_to_opener(fh); ++struct v4l2_event_client_usage { ++ __u32 count; ++}; + -+ switch (type) { -+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: -+ if (!dev->ready_for_capture) { -+ int ret = allocate_buffers(dev); -+ if (ret < 0) -+ return ret; ++/* global module data */ ++/* find a device based on it's device-number (e.g. '3' for /dev/video3) */ ++struct v4l2loopback_lookup_cb_data { ++ int device_nr; ++ struct v4l2_loopback_device *device; ++}; ++static int v4l2loopback_lookup_cb(int id, void *ptr, void *data) ++{ ++ struct v4l2_loopback_device *device = ptr; ++ struct v4l2loopback_lookup_cb_data *cbdata = data; ++ if (cbdata && device && device->vdev) { ++ if (device->vdev->num == cbdata->device_nr) { ++ cbdata->device = device; ++ cbdata->device_nr = id; ++ return 1; + } -+ opener->type = WRITER; -+ dev->ready_for_output = 0; -+ dev->ready_for_capture++; -+ return 0; -+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: -+ if (!dev->ready_for_capture) -+ return -EIO; -+ if (dev->active_readers > 0) -+ return -EBUSY; -+ opener->type = READER; -+ dev->active_readers++; -+ client_usage_queue_event(dev->vdev); -+ return 0; -+ default: -+ return -EINVAL; + } -+ return -EINVAL; ++ return 0; +} -+ -+/* stop streaming -+ * called on VIDIOC_STREAMOFF -+ */ -+static int vidioc_streamoff(struct file *file, void *fh, -+ enum v4l2_buf_type type) ++static int v4l2loopback_lookup(int device_nr, ++ struct v4l2_loopback_device **device) +{ -+ struct v4l2_loopback_device *dev; -+ struct v4l2_loopback_opener *opener; -+ -+ MARK(); -+ dprintk("%d\n", type); -+ -+ dev = v4l2loopback_getdevice(file); -+ opener = fh_to_opener(fh); -+ switch (type) { -+ case V4L2_BUF_TYPE_VIDEO_OUTPUT: -+ if (dev->ready_for_capture > 0) -+ dev->ready_for_capture--; -+ return 0; -+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: -+ if (opener->type == READER) { -+ opener->type = 0; -+ dev->active_readers--; -+ client_usage_queue_event(dev->vdev); -+ } -+ return 0; -+ default: -+ return -EINVAL; ++ struct v4l2loopback_lookup_cb_data data = { ++ .device_nr = device_nr, ++ .device = NULL, ++ }; ++ int err = idr_for_each(&v4l2loopback_index_idr, &v4l2loopback_lookup_cb, ++ &data); ++ if (1 == err) { ++ if (device) ++ *device = data.device; ++ return data.device_nr; + } -+ return -EINVAL; ++ return -ENODEV; +} -+ -+#ifdef CONFIG_VIDEO_V4L1_COMPAT -+static int vidiocgmbuf(struct file *file, void *fh, struct video_mbuf *p) ++static struct v4l2_loopback_device *v4l2loopback_cd2dev(struct device *cd) +{ -+ struct v4l2_loopback_device *dev; -+ MARK(); ++ struct video_device *loopdev = to_video_device(cd); ++ struct v4l2loopback_private *ptr = ++ (struct v4l2loopback_private *)video_get_drvdata(loopdev); ++ int nr = ptr->device_nr; + -+ dev = v4l2loopback_getdevice(file); -+ p->frames = dev->buffers_number; -+ p->offsets[0] = 0; -+ p->offsets[1] = 0; -+ p->size = dev->buffer_size; -+ return 0; ++ return idr_find(&v4l2loopback_index_idr, nr); +} -+#endif + -+static void client_usage_queue_event(struct video_device *vdev) ++static struct v4l2_loopback_device *v4l2loopback_getdevice(struct file *f) +{ -+ struct v4l2_event ev; -+ struct v4l2_loopback_device *dev; -+ -+ dev = container_of(vdev->v4l2_dev, struct v4l2_loopback_device, -+ v4l2_dev); -+ -+ memset(&ev, 0, sizeof(ev)); -+ ev.type = V4L2_EVENT_PRI_CLIENT_USAGE; -+ ((struct v4l2_event_client_usage *)&ev.u)->count = dev->active_readers; ++ struct v4l2loopback_private *ptr = video_drvdata(f); ++ int nr = ptr->device_nr; + -+ v4l2_event_queue(vdev, &ev); ++ return idr_find(&v4l2loopback_index_idr, nr); +} + -+static int client_usage_ops_add(struct v4l2_subscribed_event *sev, -+ unsigned elems) -+{ -+ if (!(sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) -+ return 0; -+ -+ client_usage_queue_event(sev->fh->vdev); -+ return 0; -+} ++/* forward declarations */ ++static void client_usage_queue_event(struct video_device *vdev); ++static void init_buffers(struct v4l2_loopback_device *dev); ++static int allocate_buffers(struct v4l2_loopback_device *dev); ++static void free_buffers(struct v4l2_loopback_device *dev); ++static void try_free_buffers(struct v4l2_loopback_device *dev); ++static int allocate_timeout_image(struct v4l2_loopback_device *dev); ++static void check_timers(struct v4l2_loopback_device *dev); ++static const struct v4l2_file_operations v4l2_loopback_fops; ++static const struct v4l2_ioctl_ops v4l2_loopback_ioctl_ops; + -+static void client_usage_ops_replace(struct v4l2_event *old, -+ const struct v4l2_event *new) ++/* Queue helpers */ ++/* next functions sets buffer flags and adjusts counters accordingly */ ++static inline void set_done(struct v4l2l_buffer *buffer) +{ -+ *((struct v4l2_event_client_usage *)&old->u) = -+ *((struct v4l2_event_client_usage *)&new->u); ++ buffer->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED; ++ buffer->buffer.flags |= V4L2_BUF_FLAG_DONE; +} + -+static void client_usage_ops_merge(const struct v4l2_event *old, -+ struct v4l2_event *new) ++static inline void set_queued(struct v4l2l_buffer *buffer) +{ -+ *((struct v4l2_event_client_usage *)&new->u) = -+ *((struct v4l2_event_client_usage *)&old->u); ++ buffer->buffer.flags &= ~V4L2_BUF_FLAG_DONE; ++ buffer->buffer.flags |= V4L2_BUF_FLAG_QUEUED; +} + -+const struct v4l2_subscribed_event_ops client_usage_ops = { -+ .add = client_usage_ops_add, -+ .replace = client_usage_ops_replace, -+ .merge = client_usage_ops_merge, -+}; -+ -+static int vidioc_subscribe_event(struct v4l2_fh *fh, -+ const struct v4l2_event_subscription *sub) ++static inline void unset_flags(struct v4l2l_buffer *buffer) +{ -+ switch (sub->type) { -+ case V4L2_EVENT_CTRL: -+ return v4l2_ctrl_subscribe_event(fh, sub); -+ case V4L2_EVENT_PRI_CLIENT_USAGE: -+ return v4l2_event_subscribe(fh, sub, 0, &client_usage_ops); -+ } -+ -+ return -EINVAL; ++ buffer->buffer.flags &= ~V4L2_BUF_FLAG_QUEUED; ++ buffer->buffer.flags &= ~V4L2_BUF_FLAG_DONE; +} + -+/* file operations */ -+static void vm_open(struct vm_area_struct *vma) ++/* V4L2 ioctl caps and params calls */ ++/* returns device capabilities ++ * called on VIDIOC_QUERYCAP ++ */ ++static int vidioc_querycap(struct file *file, void *priv, ++ struct v4l2_capability *cap) +{ -+ struct v4l2l_buffer *buf; -+ MARK(); ++ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); ++ int device_nr = ++ ((struct v4l2loopback_private *)video_get_drvdata(dev->vdev)) ++ ->device_nr; ++ __u32 capabilities = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; + -+ buf = vma->vm_private_data; -+ buf->use_count++; ++ strscpy(cap->driver, "v4l2 loopback", sizeof(cap->driver)); ++ snprintf(cap->card, sizeof(cap->card), "%s", dev->card_label); ++ snprintf(cap->bus_info, sizeof(cap->bus_info), ++ "platform:v4l2loopback-%03d", device_nr); + -+ buf->buffer.flags |= V4L2_BUF_FLAG_MAPPED; -+} ++ if (dev->announce_all_caps) { ++ capabilities |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT; ++ } else { ++ if (dev->ready_for_capture) { ++ capabilities |= V4L2_CAP_VIDEO_CAPTURE; ++ } ++ if (dev->ready_for_output) { ++ capabilities |= V4L2_CAP_VIDEO_OUTPUT; ++ } ++ } + -+static void vm_close(struct vm_area_struct *vma) -+{ -+ struct v4l2l_buffer *buf; -+ MARK(); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) ++ dev->vdev->device_caps = ++#endif /* >=linux-4.7.0 */ ++ cap->device_caps = cap->capabilities = capabilities; + -+ buf = vma->vm_private_data; -+ buf->use_count--; ++ cap->capabilities |= V4L2_CAP_DEVICE_CAPS; + -+ if (buf->use_count <= 0) -+ buf->buffer.flags &= ~V4L2_BUF_FLAG_MAPPED; ++ memset(cap->reserved, 0, sizeof(cap->reserved)); ++ return 0; +} + -+static struct vm_operations_struct vm_ops = { -+ .open = vm_open, -+ .close = vm_close, -+}; -+ -+static int v4l2_loopback_mmap(struct file *file, struct vm_area_struct *vma) ++static int vidioc_enum_framesizes(struct file *file, void *fh, ++ struct v4l2_frmsizeenum *argp) +{ -+ u8 *addr; -+ unsigned long start; -+ unsigned long size; + struct v4l2_loopback_device *dev; -+ struct v4l2_loopback_opener *opener; -+ struct v4l2l_buffer *buffer = NULL; -+ MARK(); + -+ start = (unsigned long)vma->vm_start; -+ size = (unsigned long)(vma->vm_end - vma->vm_start); ++ /* there can be only one... */ ++ if (argp->index) ++ return -EINVAL; + + dev = v4l2loopback_getdevice(file); -+ opener = fh_to_opener(file->private_data); -+ -+ if (size > dev->buffer_size) { -+ dprintk("userspace tries to mmap too much, fail\n"); -+ return -EINVAL; -+ } -+ if (opener->timeout_image_io) { -+ /* we are going to map the timeout_image_buffer */ -+ if ((vma->vm_pgoff << PAGE_SHIFT) != -+ dev->buffer_size * MAX_BUFFERS) { -+ dprintk("invalid mmap offset for timeout_image_io mode\n"); ++ if (V4L2LOOPBACK_IS_FIXED_FMT(dev)) { ++ /* format has already been negotiated ++ * cannot change during runtime ++ */ ++ if (argp->pixel_format != dev->pix_format.pixelformat) + return -EINVAL; -+ } -+ } else if ((vma->vm_pgoff << PAGE_SHIFT) > -+ dev->buffer_size * (dev->buffers_number - 1)) { -+ dprintk("userspace tries to mmap too far, fail\n"); -+ return -EINVAL; -+ } + -+ /* FIXXXXXME: allocation should not happen here! */ -+ if (NULL == dev->image) -+ if (allocate_buffers(dev) < 0) -+ return -EINVAL; ++ argp->type = V4L2_FRMSIZE_TYPE_DISCRETE; + -+ if (opener->timeout_image_io) { -+ buffer = &dev->timeout_image_buffer; -+ addr = dev->timeout_image; ++ argp->discrete.width = dev->pix_format.width; ++ argp->discrete.height = dev->pix_format.height; + } else { -+ int i; -+ for (i = 0; i < dev->buffers_number; ++i) { -+ buffer = &dev->buffers[i]; -+ if ((buffer->buffer.m.offset >> PAGE_SHIFT) == -+ vma->vm_pgoff) -+ break; -+ } -+ -+ if (i >= dev->buffers_number) ++ /* if the format has not been negotiated yet, we accept anything ++ */ ++ if (NULL == format_by_fourcc(argp->pixel_format)) + return -EINVAL; + -+ addr = dev->image + (vma->vm_pgoff << PAGE_SHIFT); -+ } -+ -+ while (size > 0) { -+ struct page *page; ++ if (dev->min_width == dev->max_width && ++ dev->min_height == dev->max_height) { ++ argp->type = V4L2_FRMSIZE_TYPE_DISCRETE; + -+ page = vmalloc_to_page(addr); -+ -+ if (vm_insert_page(vma, start, page) < 0) -+ return -EAGAIN; -+ -+ start += PAGE_SIZE; -+ addr += PAGE_SIZE; -+ size -= PAGE_SIZE; -+ } ++ argp->discrete.width = dev->min_width; ++ argp->discrete.height = dev->min_height; ++ } else { ++ argp->type = V4L2_FRMSIZE_TYPE_CONTINUOUS; + -+ vma->vm_ops = &vm_ops; -+ vma->vm_private_data = buffer; ++ argp->stepwise.min_width = dev->min_width; ++ argp->stepwise.min_height = dev->min_height; + -+ vm_open(vma); ++ argp->stepwise.max_width = dev->max_width; ++ argp->stepwise.max_height = dev->max_height; + -+ MARK(); ++ argp->stepwise.step_width = 1; ++ argp->stepwise.step_height = 1; ++ } ++ } + return 0; +} + -+static unsigned int v4l2_loopback_poll(struct file *file, -+ struct poll_table_struct *pts) ++/* returns frameinterval (fps) for the set resolution ++ * called on VIDIOC_ENUM_FRAMEINTERVALS ++ */ ++static int vidioc_enum_frameintervals(struct file *file, void *fh, ++ struct v4l2_frmivalenum *argp) +{ -+ struct v4l2_loopback_opener *opener; -+ struct v4l2_loopback_device *dev; -+ __poll_t req_events = poll_requested_events(pts); -+ int ret_mask = 0; -+ MARK(); ++ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); + -+ opener = fh_to_opener(file->private_data); -+ dev = v4l2loopback_getdevice(file); ++ /* there can be only one... */ ++ if (argp->index) ++ return -EINVAL; + -+ if (req_events & POLLPRI) { -+ if (!v4l2_event_pending(&opener->fh)) -+ poll_wait(file, &opener->fh.wait, pts); -+ if (v4l2_event_pending(&opener->fh)) { -+ ret_mask |= POLLPRI; -+ if (!(req_events & DEFAULT_POLLMASK)) -+ return ret_mask; -+ } -+ } ++ if (V4L2LOOPBACK_IS_FIXED_FMT(dev)) { ++ if (argp->width != dev->pix_format.width || ++ argp->height != dev->pix_format.height || ++ argp->pixel_format != dev->pix_format.pixelformat) ++ return -EINVAL; + -+ switch (opener->type) { -+ case WRITER: -+ ret_mask |= POLLOUT | POLLWRNORM; -+ break; -+ case READER: -+ if (!can_read(dev, opener)) { -+ if (ret_mask) -+ return ret_mask; -+ poll_wait(file, &dev->read_event, pts); -+ } -+ if (can_read(dev, opener)) -+ ret_mask |= POLLIN | POLLRDNORM; -+ if (v4l2_event_pending(&opener->fh)) -+ ret_mask |= POLLPRI; -+ break; -+ default: -+ break; ++ argp->type = V4L2_FRMIVAL_TYPE_DISCRETE; ++ argp->discrete = dev->capture_param.timeperframe; ++ } else { ++ if (argp->width < dev->min_width || ++ argp->width > dev->max_width || ++ argp->height < dev->min_height || ++ argp->height > dev->max_height || ++ NULL == format_by_fourcc(argp->pixel_format)) ++ return -EINVAL; ++ ++ argp->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; ++ argp->stepwise.min.numerator = 1; ++ argp->stepwise.min.denominator = V4L2LOOPBACK_FPS_MAX; ++ argp->stepwise.max.numerator = 1; ++ argp->stepwise.max.denominator = V4L2LOOPBACK_FPS_MIN; ++ argp->stepwise.step.numerator = 1; ++ argp->stepwise.step.denominator = 1; + } + -+ MARK(); -+ return ret_mask; ++ return 0; +} + -+/* do not want to limit device opens, it can be as many readers as user want, -+ * writers are limited by means of setting writer field */ -+static int v4l2_loopback_open(struct file *file) ++/* ------------------ CAPTURE ----------------------- */ ++ ++/* returns device formats ++ * called on VIDIOC_ENUM_FMT, with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_CAPTURE ++ */ ++static int vidioc_enum_fmt_cap(struct file *file, void *fh, ++ struct v4l2_fmtdesc *f) +{ + struct v4l2_loopback_device *dev; -+ struct v4l2_loopback_opener *opener; ++ const struct v4l2l_format *fmt; + MARK(); -+ dev = v4l2loopback_getdevice(file); -+ if (dev->open_count.counter >= dev->max_openers) -+ return -EBUSY; -+ /* kfree on close */ -+ opener = kzalloc(sizeof(*opener), GFP_KERNEL); -+ if (opener == NULL) -+ return -ENOMEM; -+ -+ atomic_inc(&dev->open_count); + -+ opener->timeout_image_io = dev->timeout_image_io; -+ if (opener->timeout_image_io) { -+ int r = allocate_timeout_image(dev); ++ dev = v4l2loopback_getdevice(file); + -+ if (r < 0) { -+ dprintk("timeout image allocation failed\n"); ++ if (f->index) ++ return -EINVAL; + -+ atomic_dec(&dev->open_count); ++ if (V4L2LOOPBACK_IS_FIXED_FMT(dev)) { ++ /* format has been fixed, so only one single format is supported */ ++ const __u32 format = dev->pix_format.pixelformat; + -+ kfree(opener); -+ return r; ++ if ((fmt = format_by_fourcc(format))) { ++ snprintf(f->description, sizeof(f->description), "%s", ++ fmt->name); ++ } else { ++ snprintf(f->description, sizeof(f->description), ++ "[%c%c%c%c]", (format >> 0) & 0xFF, ++ (format >> 8) & 0xFF, (format >> 16) & 0xFF, ++ (format >> 24) & 0xFF); + } -+ } -+ -+ v4l2_fh_init(&opener->fh, video_devdata(file)); -+ file->private_data = &opener->fh; + -+ v4l2_fh_add(&opener->fh); -+ dprintk("opened dev:%p with image:%p\n", dev, dev ? dev->image : NULL); ++ f->pixelformat = dev->pix_format.pixelformat; ++ } else { ++ return -EINVAL; ++ } ++ f->flags = 0; + MARK(); + return 0; +} + -+static int v4l2_loopback_close(struct file *file) ++/* returns current video format ++ * called on VIDIOC_G_FMT, with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_CAPTURE ++ */ ++static int vidioc_g_fmt_cap(struct file *file, void *priv, ++ struct v4l2_format *fmt) +{ -+ struct v4l2_loopback_opener *opener; + struct v4l2_loopback_device *dev; -+ int is_writer = 0, is_reader = 0; + MARK(); + -+ opener = fh_to_opener(file->private_data); + dev = v4l2loopback_getdevice(file); ++ if (!dev->ready_for_capture && !dev->ready_for_output) ++ return -EINVAL; + -+ if (WRITER == opener->type) -+ is_writer = 1; -+ if (READER == opener->type) -+ is_reader = 1; -+ -+ atomic_dec(&dev->open_count); -+ if (dev->open_count.counter == 0) { -+ del_timer_sync(&dev->sustain_timer); -+ del_timer_sync(&dev->timeout_timer); -+ } -+ try_free_buffers(dev); -+ -+ v4l2_fh_del(&opener->fh); -+ v4l2_fh_exit(&opener->fh); -+ -+ kfree(opener); -+ if (is_writer) -+ dev->ready_for_output = 1; -+ if (is_reader) { -+ dev->active_readers--; -+ client_usage_queue_event(dev->vdev); -+ } ++ fmt->fmt.pix = dev->pix_format; + MARK(); + return 0; +} + -+static ssize_t v4l2_loopback_read(struct file *file, char __user *buf, -+ size_t count, loff_t *ppos) ++/* checks if it is OK to change to format fmt; ++ * actual check is done by inner_try_setfmt ++ * just checking that pixelformat is OK and set other parameters, app should ++ * obey this decision ++ * called on VIDIOC_TRY_FMT, with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_CAPTURE ++ */ ++static int vidioc_try_fmt_cap(struct file *file, void *priv, ++ struct v4l2_format *fmt) +{ -+ int read_index; -+ struct v4l2_loopback_device *dev; -+ struct v4l2_buffer *b; -+ MARK(); -+ -+ dev = v4l2loopback_getdevice(file); ++ int ret = 0; ++ if (!V4L2_TYPE_IS_CAPTURE(fmt->type)) ++ return -EINVAL; ++ ret = inner_try_setfmt(file, fmt); ++ if (-EBUSY == ret) ++ return 0; ++ return ret; ++} + -+ read_index = get_capture_buffer(file); -+ if (read_index < 0) -+ return read_index; -+ if (count > dev->buffer_size) -+ count = dev->buffer_size; -+ b = &dev->buffers[read_index].buffer; -+ if (count > b->bytesused) -+ count = b->bytesused; -+ if (copy_to_user((void *)buf, (void *)(dev->image + b->m.offset), -+ count)) { -+ printk(KERN_ERR -+ "v4l2-loopback: failed copy_to_user() in read buf\n"); -+ return -EFAULT; ++/* sets new output format, if possible ++ * actually format is set by input and we even do not check it, just return ++ * current one, but it is possible to set subregions of input TODO(vasaka) ++ * called on VIDIOC_S_FMT, with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_CAPTURE ++ */ ++static int vidioc_s_fmt_cap(struct file *file, void *priv, ++ struct v4l2_format *fmt) ++{ ++ int ret; ++ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); ++ if (!V4L2_TYPE_IS_CAPTURE(fmt->type)) ++ return -EINVAL; ++ ret = inner_try_setfmt(file, fmt); ++ if (!ret) { ++ dev->pix_format = fmt->fmt.pix; + } -+ dprintkrw("leave v4l2_loopback_read()\n"); -+ return count; ++ return ret; +} + -+static ssize_t v4l2_loopback_write(struct file *file, const char __user *buf, -+ size_t count, loff_t *ppos) ++/* ------------------ OUTPUT ----------------------- */ ++ ++/* returns device formats; ++ * LATER: allow all formats ++ * called on VIDIOC_ENUM_FMT, with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_OUTPUT ++ */ ++static int vidioc_enum_fmt_out(struct file *file, void *fh, ++ struct v4l2_fmtdesc *f) +{ -+ struct v4l2_loopback_opener *opener; + struct v4l2_loopback_device *dev; -+ int write_index; -+ struct v4l2_buffer *b; -+ int err = 0; -+ -+ MARK(); ++ const struct v4l2l_format *fmt; + + dev = v4l2loopback_getdevice(file); -+ opener = fh_to_opener(file->private_data); + -+ if (UNNEGOTIATED == opener->type) { -+ spin_lock(&dev->lock); ++ if (V4L2LOOPBACK_IS_FIXED_FMT(dev)) { ++ /* format has been fixed, so only one single format is supported */ ++ const __u32 format = dev->pix_format.pixelformat; + -+ if (dev->ready_for_output) { -+ err = vidioc_streamon(file, file->private_data, -+ V4L2_BUF_TYPE_VIDEO_OUTPUT); -+ } ++ if (f->index) ++ return -EINVAL; + -+ spin_unlock(&dev->lock); ++ if ((fmt = format_by_fourcc(format))) { ++ snprintf(f->description, sizeof(f->description), "%s", ++ fmt->name); ++ } else { ++ snprintf(f->description, sizeof(f->description), ++ "[%c%c%c%c]", (format >> 0) & 0xFF, ++ (format >> 8) & 0xFF, (format >> 16) & 0xFF, ++ (format >> 24) & 0xFF); ++ } + -+ if (err < 0) -+ return err; -+ } ++ f->pixelformat = dev->pix_format.pixelformat; ++ } else { ++ /* fill in a dummy format */ ++ /* coverity[unsigned_compare] */ ++ if (f->index < 0 || f->index >= FORMATS) ++ return -EINVAL; + -+ if (WRITER != opener->type) -+ return -EINVAL; ++ fmt = &formats[f->index]; + -+ if (!dev->ready_for_capture) { -+ int ret = allocate_buffers(dev); -+ if (ret < 0) -+ return ret; -+ dev->ready_for_capture = 1; ++ f->pixelformat = fmt->fourcc; ++ snprintf(f->description, sizeof(f->description), "%s", ++ fmt->name); + } -+ dprintkrw("v4l2_loopback_write() trying to write %zu bytes\n", count); -+ if (count > dev->buffer_size) -+ count = dev->buffer_size; -+ -+ write_index = v4l2l_mod64(dev->write_position, dev->used_buffers); -+ b = &dev->buffers[write_index].buffer; ++ f->flags = 0; + -+ if (copy_from_user((void *)(dev->image + b->m.offset), (void *)buf, -+ count)) { -+ printk(KERN_ERR -+ "v4l2-loopback: failed copy_from_user() in write buf, could not write %zu\n", -+ count); -+ return -EFAULT; -+ } -+ v4l2l_get_timestamp(b); -+ b->bytesused = count; -+ b->sequence = dev->write_position; -+ buffer_written(dev, &dev->buffers[write_index]); -+ wake_up_all(&dev->read_event); -+ dprintkrw("leave v4l2_loopback_write()\n"); -+ return count; ++ return 0; +} + -+/* init functions */ -+/* frees buffers, if already allocated */ -+static void free_buffers(struct v4l2_loopback_device *dev) ++/* returns current video format format fmt */ ++/* NOTE: this is called from the producer ++ * so if format has not been negotiated yet, ++ * it should return ALL of available formats, ++ * called on VIDIOC_G_FMT, with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_OUTPUT ++ */ ++static int vidioc_g_fmt_out(struct file *file, void *priv, ++ struct v4l2_format *fmt) +{ ++ struct v4l2_loopback_device *dev; + MARK(); -+ dprintk("freeing image@%p for dev:%p\n", dev ? dev->image : NULL, dev); -+ if (!dev) -+ return; -+ if (dev->image) { -+ vfree(dev->image); -+ dev->image = NULL; -+ } -+ if (dev->timeout_image) { -+ vfree(dev->timeout_image); -+ dev->timeout_image = NULL; -+ } -+ dev->imagesize = 0; ++ ++ dev = v4l2loopback_getdevice(file); ++ ++ /* ++ * LATER: this should return the currently valid format ++ * gstreamer doesn't like it, if this returns -EINVAL, as it ++ * then concludes that there is _no_ valid format ++ * CHECK whether this assumption is wrong, ++ * or whether we have to always provide a valid format ++ */ ++ ++ fmt->fmt.pix = dev->pix_format; ++ return 0; +} -+/* frees buffers, if they are no longer needed */ -+static void try_free_buffers(struct v4l2_loopback_device *dev) ++ ++/* checks if it is OK to change to format fmt; ++ * if format is negotiated do not change it ++ * called on VIDIOC_TRY_FMT with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_OUTPUT ++ */ ++static int vidioc_try_fmt_out(struct file *file, void *priv, ++ struct v4l2_format *fmt) +{ -+ MARK(); -+ if (0 == dev->open_count.counter && !dev->keep_format) { -+ free_buffers(dev); -+ dev->ready_for_capture = 0; -+ dev->buffer_size = 0; -+ dev->write_position = 0; -+ } ++ int ret = 0; ++ if (!V4L2_TYPE_IS_OUTPUT(fmt->type)) ++ return -EINVAL; ++ ret = inner_try_setfmt(file, fmt); ++ if (-EBUSY == ret) ++ return 0; ++ return ret; +} -+/* allocates buffers, if buffer_size is set */ -+static int allocate_buffers(struct v4l2_loopback_device *dev) -+{ -+ int err; -+ -+ MARK(); -+ /* vfree on close file operation in case no open handles left */ + -+ if (dev->buffer_size < 1 || dev->buffers_number < 1) ++/* sets new output format, if possible; ++ * allocate data here because we do not know if it will be streaming or ++ * read/write IO ++ * called on VIDIOC_S_FMT with v4l2_buf_type set to V4L2_BUF_TYPE_VIDEO_OUTPUT ++ */ ++static int vidioc_s_fmt_out(struct file *file, void *priv, ++ struct v4l2_format *fmt) ++{ ++ struct v4l2_loopback_device *dev; ++ int ret; ++ char buf[5]; ++ buf[4] = 0; ++ if (!V4L2_TYPE_IS_OUTPUT(fmt->type)) + return -EINVAL; ++ dev = v4l2loopback_getdevice(file); + -+ if ((__LONG_MAX__ / dev->buffer_size) < dev->buffers_number) -+ return -ENOSPC; -+ -+ if (dev->image) { -+ dprintk("allocating buffers again: %ld %ld\n", -+ dev->buffer_size * dev->buffers_number, dev->imagesize); -+ /* FIXME: prevent double allocation more intelligently! */ -+ if (dev->buffer_size * dev->buffers_number == dev->imagesize) -+ return 0; ++ ret = inner_try_setfmt(file, fmt); ++ if (!ret) { ++ dev->pix_format = fmt->fmt.pix; ++ dev->pix_format_has_valid_sizeimage = ++ v4l2l_pix_format_has_valid_sizeimage(fmt); ++ dprintk("s_fmt_out(%d) %d...%d\n", ret, dev->ready_for_capture, ++ dev->pix_format.sizeimage); ++ dprintk("outFOURCC=%s\n", ++ fourcc2str(dev->pix_format.pixelformat, buf)); + -+ /* check whether the total number of readers/writers is <=1 */ -+ if ((dev->ready_for_capture + dev->active_readers) <= 1) -+ free_buffers(dev); -+ else -+ return -EINVAL; ++ if (!dev->ready_for_capture) { ++ dev->buffer_size = ++ PAGE_ALIGN(dev->pix_format.sizeimage); ++ // JMZ: TODO get rid of the next line ++ fmt->fmt.pix.sizeimage = dev->buffer_size; ++ ret = allocate_buffers(dev); ++ } + } ++ return ret; ++} + -+ dev->imagesize = (unsigned long)dev->buffer_size * -+ (unsigned long)dev->buffers_number; -+ -+ dprintk("allocating %ld = %ldx%d\n", dev->imagesize, dev->buffer_size, -+ dev->buffers_number); -+ err = -ENOMEM; ++// #define V4L2L_OVERLAY ++#ifdef V4L2L_OVERLAY ++/* ------------------ OVERLAY ----------------------- */ ++/* currently unsupported */ ++/* GSTreamer's v4l2sink is buggy, as it requires the overlay to work ++ * while it should only require it, if overlay is requested ++ * once the gstreamer element is fixed, remove the overlay dummies ++ */ ++#warning OVERLAY dummies ++static int vidioc_g_fmt_overlay(struct file *file, void *priv, ++ struct v4l2_format *fmt) ++{ ++ return 0; ++} + -+ if (dev->timeout_jiffies > 0) { -+ err = allocate_timeout_image(dev); -+ if (err < 0) -+ goto error; -+ } ++static int vidioc_s_fmt_overlay(struct file *file, void *priv, ++ struct v4l2_format *fmt) ++{ ++ return 0; ++} ++#endif /* V4L2L_OVERLAY */ + -+ dev->image = vmalloc(dev->imagesize); -+ if (dev->image == NULL) -+ goto error; ++/* ------------------ PARAMs ----------------------- */ + -+ dprintk("vmallocated %ld bytes\n", dev->imagesize); ++/* get some data flow parameters, only capability, fps and readbuffers has ++ * effect on this driver ++ * called on VIDIOC_G_PARM ++ */ ++static int vidioc_g_parm(struct file *file, void *priv, ++ struct v4l2_streamparm *parm) ++{ ++ /* do not care about type of opener, hope these enums would always be ++ * compatible */ ++ struct v4l2_loopback_device *dev; + MARK(); + -+ init_buffers(dev); ++ dev = v4l2loopback_getdevice(file); ++ parm->parm.capture = dev->capture_param; + return 0; -+ -+error: -+ free_buffers(dev); -+ return err; +} + -+/* init inner buffers, they are capture mode and flags are set as -+ * for capture mod buffers */ -+static void init_buffers(struct v4l2_loopback_device *dev) ++/* get some data flow parameters, only capability, fps and readbuffers has ++ * effect on this driver ++ * called on VIDIOC_S_PARM ++ */ ++static int vidioc_s_parm(struct file *file, void *priv, ++ struct v4l2_streamparm *parm) +{ -+ int i; -+ int buffer_size; -+ int bytesused; ++ struct v4l2_loopback_device *dev; ++ int err = 0; + MARK(); + -+ buffer_size = dev->buffer_size; -+ bytesused = dev->pix_format.sizeimage; -+ for (i = 0; i < dev->buffers_number; ++i) { -+ struct v4l2_buffer *b = &dev->buffers[i].buffer; -+ b->index = i; -+ b->bytesused = bytesused; -+ b->length = buffer_size; -+ b->field = V4L2_FIELD_NONE; -+ b->flags = 0; -+ b->m.offset = i * buffer_size; -+ b->memory = V4L2_MEMORY_MMAP; -+ b->sequence = 0; -+ b->timestamp.tv_sec = 0; -+ b->timestamp.tv_usec = 0; -+ b->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ++ dev = v4l2loopback_getdevice(file); ++ dprintk("vidioc_s_parm called frate=%d/%d\n", ++ parm->parm.capture.timeperframe.numerator, ++ parm->parm.capture.timeperframe.denominator); + -+ v4l2l_get_timestamp(b); ++ switch (parm->type) { ++ case V4L2_BUF_TYPE_VIDEO_CAPTURE: ++ if ((err = set_timeperframe( ++ dev, &parm->parm.capture.timeperframe)) < 0) ++ return err; ++ break; ++ case V4L2_BUF_TYPE_VIDEO_OUTPUT: ++ if ((err = set_timeperframe( ++ dev, &parm->parm.capture.timeperframe)) < 0) ++ return err; ++ break; ++ default: ++ return -1; + } -+ dev->timeout_image_buffer = dev->buffers[0]; -+ dev->timeout_image_buffer.buffer.m.offset = MAX_BUFFERS * buffer_size; -+ MARK(); ++ ++ parm->parm.capture = dev->capture_param; ++ return 0; +} + -+static int allocate_timeout_image(struct v4l2_loopback_device *dev) ++#ifdef V4L2LOOPBACK_WITH_STD ++/* sets a tv standard, actually we do not need to handle this any special way ++ * added to support effecttv ++ * called on VIDIOC_S_STD ++ */ ++static int vidioc_s_std(struct file *file, void *fh, v4l2_std_id *_std) +{ -+ MARK(); -+ if (dev->buffer_size <= 0) { -+ dev->timeout_image_io = 0; -+ return -EINVAL; ++ v4l2_std_id req_std = 0, supported_std = 0; ++ const v4l2_std_id all_std = V4L2_STD_ALL, no_std = 0; ++ ++ if (_std) { ++ req_std = *_std; ++ *_std = all_std; + } + -+ if (dev->timeout_image == NULL) { -+ dev->timeout_image = vzalloc(dev->buffer_size); -+ if (dev->timeout_image == NULL) { -+ dev->timeout_image_io = 0; -+ return -ENOMEM; -+ } ++ /* we support everything in V4L2_STD_ALL, but not more... */ ++ supported_std = (all_std & req_std); ++ if (no_std == supported_std) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++/* gets a fake video standard ++ * called on VIDIOC_G_STD ++ */ ++static int vidioc_g_std(struct file *file, void *fh, v4l2_std_id *norm) ++{ ++ if (norm) ++ *norm = V4L2_STD_ALL; ++ return 0; ++} ++/* gets a fake video standard ++ * called on VIDIOC_QUERYSTD ++ */ ++static int vidioc_querystd(struct file *file, void *fh, v4l2_std_id *norm) ++{ ++ if (norm) ++ *norm = V4L2_STD_ALL; ++ return 0; ++} ++#endif /* V4L2LOOPBACK_WITH_STD */ ++ ++static int v4l2loopback_set_ctrl(struct v4l2_loopback_device *dev, u32 id, ++ s64 val) ++{ ++ switch (id) { ++ case CID_KEEP_FORMAT: ++ if (val < 0 || val > 1) ++ return -EINVAL; ++ dev->keep_format = val; ++ try_free_buffers( ++ dev); /* will only free buffers if !keep_format */ ++ break; ++ case CID_SUSTAIN_FRAMERATE: ++ if (val < 0 || val > 1) ++ return -EINVAL; ++ spin_lock_bh(&dev->lock); ++ dev->sustain_framerate = val; ++ check_timers(dev); ++ spin_unlock_bh(&dev->lock); ++ break; ++ case CID_TIMEOUT: ++ if (val < 0 || val > MAX_TIMEOUT) ++ return -EINVAL; ++ spin_lock_bh(&dev->lock); ++ dev->timeout_jiffies = msecs_to_jiffies(val); ++ check_timers(dev); ++ spin_unlock_bh(&dev->lock); ++ allocate_timeout_image(dev); ++ break; ++ case CID_TIMEOUT_IMAGE_IO: ++ dev->timeout_image_io = 1; ++ break; ++ default: ++ return -EINVAL; + } + return 0; +} + -+/* fills and register video device */ -+static void init_vdev(struct video_device *vdev, int nr) ++static int v4l2loopback_s_ctrl(struct v4l2_ctrl *ctrl) ++{ ++ struct v4l2_loopback_device *dev = container_of( ++ ctrl->handler, struct v4l2_loopback_device, ctrl_handler); ++ return v4l2loopback_set_ctrl(dev, ctrl->id, ctrl->val); ++} ++ ++/* returns set of device outputs, in our case there is only one ++ * called on VIDIOC_ENUMOUTPUT ++ */ ++static int vidioc_enum_output(struct file *file, void *fh, ++ struct v4l2_output *outp) +{ ++ __u32 index = outp->index; ++ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); + MARK(); + -+#ifdef V4L2LOOPBACK_WITH_STD -+ vdev->tvnorms = V4L2_STD_ALL; -+#endif /* V4L2LOOPBACK_WITH_STD */ ++ if (!dev->announce_all_caps && !dev->ready_for_output) ++ return -ENOTTY; + -+ vdev->vfl_type = VFL_TYPE_VIDEO; -+ vdev->fops = &v4l2_loopback_fops; -+ vdev->ioctl_ops = &v4l2_loopback_ioctl_ops; -+ vdev->release = &video_device_release; -+ vdev->minor = -1; -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) -+ vdev->device_caps = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_VIDEO_CAPTURE | -+ V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE | -+ V4L2_CAP_STREAMING; -+#endif ++ if (0 != index) ++ return -EINVAL; + -+ if (debug > 1) -+ vdev->dev_debug = V4L2_DEV_DEBUG_IOCTL | -+ V4L2_DEV_DEBUG_IOCTL_ARG; ++ /* clear all data (including the reserved fields) */ ++ memset(outp, 0, sizeof(*outp)); + -+ vdev->vfl_dir = VFL_DIR_M2M; ++ outp->index = index; ++ strscpy(outp->name, "loopback in", sizeof(outp->name)); ++ outp->type = V4L2_OUTPUT_TYPE_ANALOG; ++ outp->audioset = 0; ++ outp->modulator = 0; ++#ifdef V4L2LOOPBACK_WITH_STD ++ outp->std = V4L2_STD_ALL; ++#ifdef V4L2_OUT_CAP_STD ++ outp->capabilities |= V4L2_OUT_CAP_STD; ++#endif /* V4L2_OUT_CAP_STD */ ++#endif /* V4L2LOOPBACK_WITH_STD */ + -+ MARK(); ++ return 0; +} + -+/* init default capture parameters, only fps may be changed in future */ -+static void init_capture_param(struct v4l2_captureparm *capture_param) ++/* which output is currently active, ++ * called on VIDIOC_G_OUTPUT ++ */ ++static int vidioc_g_output(struct file *file, void *fh, unsigned int *i) +{ -+ MARK(); -+ capture_param->capability = 0; -+ capture_param->capturemode = 0; -+ capture_param->extendedmode = 0; -+ capture_param->readbuffers = max_buffers; -+ capture_param->timeperframe.numerator = 1; -+ capture_param->timeperframe.denominator = 30; ++ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); ++ if (!dev->announce_all_caps && !dev->ready_for_output) ++ return -ENOTTY; ++ if (i) ++ *i = 0; ++ return 0; +} + -+static void check_timers(struct v4l2_loopback_device *dev) ++/* set output, can make sense if we have more than one video src, ++ * called on VIDIOC_S_OUTPUT ++ */ ++static int vidioc_s_output(struct file *file, void *fh, unsigned int i) +{ -+ if (!dev->ready_for_capture) -+ return; ++ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); ++ if (!dev->announce_all_caps && !dev->ready_for_output) ++ return -ENOTTY; + -+ if (dev->timeout_jiffies > 0 && !timer_pending(&dev->timeout_timer)) -+ mod_timer(&dev->timeout_timer, jiffies + dev->timeout_jiffies); -+ if (dev->sustain_framerate && !timer_pending(&dev->sustain_timer)) -+ mod_timer(&dev->sustain_timer, -+ jiffies + dev->frame_jiffies * 3 / 2); ++ if (i) ++ return -EINVAL; ++ ++ return 0; +} -+#ifdef HAVE_TIMER_SETUP -+static void sustain_timer_clb(struct timer_list *t) -+{ -+ struct v4l2_loopback_device *dev = from_timer(dev, t, sustain_timer); -+#else -+static void sustain_timer_clb(unsigned long nr) ++ ++/* returns set of device inputs, in our case there is only one, ++ * but later I may add more ++ * called on VIDIOC_ENUMINPUT ++ */ ++static int vidioc_enum_input(struct file *file, void *fh, ++ struct v4l2_input *inp) +{ -+ struct v4l2_loopback_device *dev = -+ idr_find(&v4l2loopback_index_idr, nr); ++ struct v4l2_loopback_device *dev; ++ __u32 index = inp->index; ++ MARK(); ++ ++ if (0 != index) ++ return -EINVAL; ++ ++ /* clear all data (including the reserved fields) */ ++ memset(inp, 0, sizeof(*inp)); ++ ++ inp->index = index; ++ strscpy(inp->name, "loopback", sizeof(inp->name)); ++ inp->type = V4L2_INPUT_TYPE_CAMERA; ++ inp->audioset = 0; ++ inp->tuner = 0; ++ inp->status = 0; ++ ++#ifdef V4L2LOOPBACK_WITH_STD ++ inp->std = V4L2_STD_ALL; ++#ifdef V4L2_IN_CAP_STD ++ inp->capabilities |= V4L2_IN_CAP_STD; +#endif -+ spin_lock(&dev->lock); -+ if (dev->sustain_framerate) { -+ dev->reread_count++; -+ dprintkrw("reread: %lld %d\n", (long long)dev->write_position, -+ dev->reread_count); -+ if (dev->reread_count == 1) -+ mod_timer(&dev->sustain_timer, -+ jiffies + max(1UL, dev->frame_jiffies / 2)); -+ else -+ mod_timer(&dev->sustain_timer, -+ jiffies + dev->frame_jiffies); -+ wake_up_all(&dev->read_event); ++#endif /* V4L2LOOPBACK_WITH_STD */ ++ ++ dev = v4l2loopback_getdevice(file); ++ if (!dev->ready_for_capture) { ++ inp->status |= V4L2_IN_ST_NO_SIGNAL; + } -+ spin_unlock(&dev->lock); ++ ++ return 0; +} -+#ifdef HAVE_TIMER_SETUP -+static void timeout_timer_clb(struct timer_list *t) ++ ++/* which input is currently active, ++ * called on VIDIOC_G_INPUT ++ */ ++static int vidioc_g_input(struct file *file, void *fh, unsigned int *i) +{ -+ struct v4l2_loopback_device *dev = from_timer(dev, t, timeout_timer); -+#else -+static void timeout_timer_clb(unsigned long nr) ++ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); ++ if (!dev->announce_all_caps && !dev->ready_for_capture) ++ return -ENOTTY; ++ if (i) ++ *i = 0; ++ return 0; ++} ++ ++/* set input, can make sense if we have more than one video src, ++ * called on VIDIOC_S_INPUT ++ */ ++static int vidioc_s_input(struct file *file, void *fh, unsigned int i) +{ -+ struct v4l2_loopback_device *dev = -+ idr_find(&v4l2loopback_index_idr, nr); -+#endif -+ spin_lock(&dev->lock); -+ if (dev->timeout_jiffies > 0) { -+ dev->timeout_happened = 1; -+ mod_timer(&dev->timeout_timer, jiffies + dev->timeout_jiffies); -+ wake_up_all(&dev->read_event); -+ } -+ spin_unlock(&dev->lock); ++ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); ++ if (!dev->announce_all_caps && !dev->ready_for_capture) ++ return -ENOTTY; ++ if (i == 0) ++ return 0; ++ return -EINVAL; +} + -+/* init loopback main structure */ -+#define DEFAULT_FROM_CONF(confmember, default_condition, default_value) \ -+ ((conf) ? \ -+ ((conf->confmember default_condition) ? (default_value) : \ -+ (conf->confmember)) : \ -+ default_value) ++/* --------------- V4L2 ioctl buffer related calls ----------------- */ + -+static int v4l2_loopback_add(struct v4l2_loopback_config *conf, int *ret_nr) ++/* negotiate buffer type ++ * only mmap streaming supported ++ * called on VIDIOC_REQBUFS ++ */ ++static int vidioc_reqbufs(struct file *file, void *fh, ++ struct v4l2_requestbuffers *b) +{ + struct v4l2_loopback_device *dev; -+ struct v4l2_ctrl_handler *hdl; -+ struct v4l2loopback_private *vdev_priv = NULL; ++ struct v4l2_loopback_opener *opener; ++ int i; ++ MARK(); + -+ int err = -ENOMEM; ++ dev = v4l2loopback_getdevice(file); ++ opener = fh_to_opener(fh); + -+ u32 _width = V4L2LOOPBACK_SIZE_DEFAULT_WIDTH; -+ u32 _height = V4L2LOOPBACK_SIZE_DEFAULT_HEIGHT; ++ dprintk("reqbufs: %d\t%d=%d\n", b->memory, b->count, ++ dev->buffers_number); + -+ u32 _min_width = DEFAULT_FROM_CONF(min_width, -+ < V4L2LOOPBACK_SIZE_MIN_WIDTH, -+ V4L2LOOPBACK_SIZE_MIN_WIDTH); -+ u32 _min_height = DEFAULT_FROM_CONF(min_height, -+ < V4L2LOOPBACK_SIZE_MIN_HEIGHT, -+ V4L2LOOPBACK_SIZE_MIN_HEIGHT); -+ u32 _max_width = DEFAULT_FROM_CONF(max_width, < _min_width, max_width); -+ u32 _max_height = -+ DEFAULT_FROM_CONF(max_height, < _min_height, max_height); -+ bool _announce_all_caps = (conf && conf->announce_all_caps >= 0) ? -+ (conf->announce_all_caps) : -+ V4L2LOOPBACK_DEFAULT_EXCLUSIVECAPS; -+ int _max_buffers = DEFAULT_FROM_CONF(max_buffers, <= 0, max_buffers); -+ int _max_openers = DEFAULT_FROM_CONF(max_openers, <= 0, max_openers); ++ if (opener->timeout_image_io) { ++ dev->timeout_image_io = 0; ++ if (b->memory != V4L2_MEMORY_MMAP) ++ return -EINVAL; ++ b->count = 2; ++ return 0; ++ } + -+ int nr = -1; ++ if (V4L2_TYPE_IS_OUTPUT(b->type) && (!dev->ready_for_output)) { ++ return -EBUSY; ++ } + -+ _announce_all_caps = (!!_announce_all_caps); ++ init_buffers(dev); ++ switch (b->memory) { ++ case V4L2_MEMORY_MMAP: ++ /* do nothing here, buffers are always allocated */ ++ if (b->count < 1 || dev->buffers_number < 1) ++ return 0; + -+ if (conf) { -+ const int output_nr = conf->output_nr; -+#ifdef SPLIT_DEVICES -+ const int capture_nr = conf->capture_nr; -+#else -+ const int capture_nr = output_nr; -+#endif -+ if (capture_nr >= 0 && output_nr == capture_nr) { -+ nr = output_nr; -+ } else if (capture_nr < 0 && output_nr < 0) { -+ nr = -1; -+ } else if (capture_nr < 0) { -+ nr = output_nr; -+ } else if (output_nr < 0) { -+ nr = capture_nr; -+ } else { -+ printk(KERN_ERR -+ "split OUTPUT and CAPTURE devices not yet supported."); -+ printk(KERN_INFO -+ "both devices must have the same number (%d != %d).", -+ output_nr, capture_nr); -+ return -EINVAL; ++ if (b->count > dev->buffers_number) ++ b->count = dev->buffers_number; ++ ++ /* make sure that outbufs_list contains buffers from 0 to used_buffers-1 ++ * actually, it will have been already populated via v4l2_loopback_init() ++ * at this point */ ++ if (list_empty(&dev->outbufs_list)) { ++ for (i = 0; i < dev->used_buffers; ++i) ++ list_add_tail(&dev->buffers[i].list_head, ++ &dev->outbufs_list); + } -+ } + -+ if (idr_find(&v4l2loopback_index_idr, nr)) -+ return -EEXIST; ++ /* also, if dev->used_buffers is going to be decreased, we should remove ++ * out-of-range buffers from outbufs_list, and fix bufpos2index mapping */ ++ if (b->count < dev->used_buffers) { ++ struct v4l2l_buffer *pos, *n; + -+ dprintk("creating v4l2loopback-device #%d\n", nr); -+ dev = kzalloc(sizeof(*dev), GFP_KERNEL); -+ if (!dev) -+ return -ENOMEM; ++ list_for_each_entry_safe(pos, n, &dev->outbufs_list, ++ list_head) { ++ if (pos->buffer.index >= b->count) ++ list_del(&pos->list_head); ++ } + -+ /* allocate id, if @id >= 0, we're requesting that specific id */ -+ if (nr >= 0) { -+ err = idr_alloc(&v4l2loopback_index_idr, dev, nr, nr + 1, -+ GFP_KERNEL); -+ if (err == -ENOSPC) -+ err = -EEXIST; -+ } else { -+ err = idr_alloc(&v4l2loopback_index_idr, dev, 0, 0, GFP_KERNEL); -+ } -+ if (err < 0) -+ goto out_free_dev; -+ nr = err; -+ err = -ENOMEM; ++ /* after we update dev->used_buffers, buffers in outbufs_list will ++ * correspond to dev->write_position + [0;b->count-1] range */ ++ i = v4l2l_mod64(dev->write_position, b->count); ++ list_for_each_entry(pos, &dev->outbufs_list, ++ list_head) { ++ dev->bufpos2index[i % b->count] = ++ pos->buffer.index; ++ ++i; ++ } ++ } + -+ if (conf && conf->card_label[0]) { -+ snprintf(dev->card_label, sizeof(dev->card_label), "%s", -+ conf->card_label); -+ } else { -+ snprintf(dev->card_label, sizeof(dev->card_label), -+ "Dummy video device (0x%04X)", nr); ++ opener->buffers_number = b->count; ++ if (opener->buffers_number < dev->used_buffers) ++ dev->used_buffers = opener->buffers_number; ++ return 0; ++ default: ++ return -EINVAL; + } -+ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), -+ "v4l2loopback-%03d", nr); ++} + -+ err = v4l2_device_register(NULL, &dev->v4l2_dev); -+ if (err) -+ goto out_free_idr; -+ MARK(); ++/* returns buffer asked for; ++ * give app as many buffers as it wants, if it less than MAX, ++ * but map them in our inner buffers ++ * called on VIDIOC_QUERYBUF ++ */ ++static int vidioc_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) ++{ ++ enum v4l2_buf_type type; ++ int index; ++ struct v4l2_loopback_device *dev; ++ struct v4l2_loopback_opener *opener; + -+ dev->vdev = video_device_alloc(); -+ if (dev->vdev == NULL) { -+ err = -ENOMEM; -+ goto out_unregister; -+ } ++ MARK(); + -+ vdev_priv = kzalloc(sizeof(struct v4l2loopback_private), GFP_KERNEL); -+ if (vdev_priv == NULL) { -+ err = -ENOMEM; -+ goto out_unregister; -+ } ++ type = b->type; ++ index = b->index; ++ dev = v4l2loopback_getdevice(file); ++ opener = fh_to_opener(fh); + -+ video_set_drvdata(dev->vdev, vdev_priv); -+ if (video_get_drvdata(dev->vdev) == NULL) { -+ err = -ENOMEM; -+ goto out_unregister; ++ if ((b->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && ++ (b->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)) { ++ return -EINVAL; + } ++ if (b->index > max_buffers) ++ return -EINVAL; + -+ MARK(); -+ snprintf(dev->vdev->name, sizeof(dev->vdev->name), "%s", -+ dev->card_label); -+ -+ vdev_priv->device_nr = nr; ++ if (opener->timeout_image_io) ++ *b = dev->timeout_image_buffer.buffer; ++ else ++ *b = dev->buffers[b->index % dev->used_buffers].buffer; + -+ init_vdev(dev->vdev, nr); -+ dev->vdev->v4l2_dev = &dev->v4l2_dev; -+ init_capture_param(&dev->capture_param); -+ err = set_timeperframe(dev, &dev->capture_param.timeperframe); -+ if (err) -+ goto out_unregister; -+ dev->keep_format = 0; -+ dev->sustain_framerate = 0; ++ b->type = type; ++ b->index = index; ++ dprintkrw("buffer type: %d (of %d with size=%ld)\n", b->memory, ++ dev->buffers_number, dev->buffer_size); + -+ dev->announce_all_caps = _announce_all_caps; -+ dev->min_width = _min_width; -+ dev->min_height = _min_height; -+ dev->max_width = _max_width; -+ dev->max_height = _max_height; -+ dev->max_openers = _max_openers; -+ dev->buffers_number = dev->used_buffers = _max_buffers; ++ /* Hopefully fix 'DQBUF return bad index if queue bigger then 2 for capture' ++ https://github.com/umlaeute/v4l2loopback/issues/60 */ ++ b->flags &= ~V4L2_BUF_FLAG_DONE; ++ b->flags |= V4L2_BUF_FLAG_QUEUED; + -+ dev->write_position = 0; ++ return 0; ++} + -+ MARK(); -+ spin_lock_init(&dev->lock); -+ spin_lock_init(&dev->list_lock); -+ INIT_LIST_HEAD(&dev->outbufs_list); -+ if (list_empty(&dev->outbufs_list)) { -+ int i; ++static void buffer_written(struct v4l2_loopback_device *dev, ++ struct v4l2l_buffer *buf) ++{ ++ del_timer_sync(&dev->sustain_timer); ++ del_timer_sync(&dev->timeout_timer); + -+ for (i = 0; i < dev->used_buffers; ++i) -+ list_add_tail(&dev->buffers[i].list_head, -+ &dev->outbufs_list); -+ } -+ memset(dev->bufpos2index, 0, sizeof(dev->bufpos2index)); -+ atomic_set(&dev->open_count, 0); -+ dev->ready_for_capture = 0; -+ dev->ready_for_output = 1; ++ spin_lock_bh(&dev->list_lock); ++ list_move_tail(&buf->list_head, &dev->outbufs_list); ++ spin_unlock_bh(&dev->list_lock); + -+ dev->buffer_size = 0; -+ dev->image = NULL; -+ dev->imagesize = 0; -+#ifdef HAVE_TIMER_SETUP -+ timer_setup(&dev->sustain_timer, sustain_timer_clb, 0); -+ timer_setup(&dev->timeout_timer, timeout_timer_clb, 0); -+#else -+ setup_timer(&dev->sustain_timer, sustain_timer_clb, nr); -+ setup_timer(&dev->timeout_timer, timeout_timer_clb, nr); -+#endif ++ spin_lock_bh(&dev->lock); ++ dev->bufpos2index[v4l2l_mod64(dev->write_position, dev->used_buffers)] = ++ buf->buffer.index; ++ ++dev->write_position; + dev->reread_count = 0; -+ dev->timeout_jiffies = 0; -+ dev->timeout_image = NULL; -+ dev->timeout_happened = 0; + -+ hdl = &dev->ctrl_handler; -+ err = v4l2_ctrl_handler_init(hdl, 4); -+ if (err) -+ goto out_unregister; -+ v4l2_ctrl_new_custom(hdl, &v4l2loopback_ctrl_keepformat, NULL); -+ v4l2_ctrl_new_custom(hdl, &v4l2loopback_ctrl_sustainframerate, NULL); -+ v4l2_ctrl_new_custom(hdl, &v4l2loopback_ctrl_timeout, NULL); -+ v4l2_ctrl_new_custom(hdl, &v4l2loopback_ctrl_timeoutimageio, NULL); -+ if (hdl->error) { -+ err = hdl->error; -+ goto out_free_handler; -+ } -+ dev->v4l2_dev.ctrl_handler = hdl; ++ check_timers(dev); ++ spin_unlock_bh(&dev->lock); ++} + -+ err = v4l2_ctrl_handler_setup(hdl); -+ if (err) -+ goto out_free_handler; ++/* put buffer to queue ++ * called on VIDIOC_QBUF ++ */ ++static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) ++{ ++ struct v4l2_loopback_device *dev; ++ struct v4l2_loopback_opener *opener; ++ struct v4l2l_buffer *b; ++ int index; + -+ /* FIXME set buffers to 0 */ ++ dev = v4l2loopback_getdevice(file); ++ opener = fh_to_opener(fh); + -+ /* Set initial format */ -+ if (_width < _min_width) -+ _width = _min_width; -+ if (_width > _max_width) -+ _width = _max_width; -+ if (_height < _min_height) -+ _height = _min_height; -+ if (_height > _max_height) -+ _height = _max_height; ++ if (buf->index > max_buffers) ++ return -EINVAL; ++ if (opener->timeout_image_io) ++ return 0; + -+ dev->pix_format.width = _width; -+ dev->pix_format.height = _height; -+ dev->pix_format.pixelformat = formats[0].fourcc; -+ dev->pix_format.colorspace = -+ V4L2_COLORSPACE_DEFAULT; /* do we need to set this ? */ -+ dev->pix_format.field = V4L2_FIELD_NONE; ++ index = buf->index % dev->used_buffers; ++ b = &dev->buffers[index]; + -+ dev->buffer_size = PAGE_ALIGN(dev->pix_format.sizeimage); -+ dprintk("buffer_size = %ld (=%d)\n", dev->buffer_size, -+ dev->pix_format.sizeimage); ++ switch (buf->type) { ++ case V4L2_BUF_TYPE_VIDEO_CAPTURE: ++ dprintkrw( ++ "qbuf(CAPTURE)#%d: buffer#%d @ %p type=%d bytesused=%d length=%d flags=%x field=%d timestamp=%lld.%06ld sequence=%d\n", ++ index, buf->index, buf, buf->type, buf->bytesused, ++ buf->length, buf->flags, buf->field, ++ (long long)buf->timestamp.tv_sec, ++ (long int)buf->timestamp.tv_usec, buf->sequence); ++ set_queued(b); ++ return 0; ++ case V4L2_BUF_TYPE_VIDEO_OUTPUT: ++ dprintkrw( ++ "qbuf(OUTPUT)#%d: buffer#%d @ %p type=%d bytesused=%d length=%d flags=%x field=%d timestamp=%lld.%06ld sequence=%d\n", ++ index, buf->index, buf, buf->type, buf->bytesused, ++ buf->length, buf->flags, buf->field, ++ (long long)buf->timestamp.tv_sec, ++ (long int)buf->timestamp.tv_usec, buf->sequence); ++ if ((!(b->buffer.flags & V4L2_BUF_FLAG_TIMESTAMP_COPY)) && ++ (buf->timestamp.tv_sec == 0 && buf->timestamp.tv_usec == 0)) ++ v4l2l_get_timestamp(&b->buffer); ++ else { ++ b->buffer.timestamp = buf->timestamp; ++ b->buffer.flags |= V4L2_BUF_FLAG_TIMESTAMP_COPY; ++ } ++ if (dev->pix_format_has_valid_sizeimage) { ++ if (buf->bytesused >= dev->pix_format.sizeimage) { ++ b->buffer.bytesused = dev->pix_format.sizeimage; ++ } else { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) ++ dev_warn_ratelimited( ++ &dev->vdev->dev, ++#else ++ dprintkrw( ++#endif ++ "warning queued output buffer bytesused too small %d < %d\n", ++ buf->bytesused, ++ dev->pix_format.sizeimage); ++ b->buffer.bytesused = buf->bytesused; ++ } ++ } else { ++ b->buffer.bytesused = buf->bytesused; ++ } + -+ if (dev->buffer_size && ((err = allocate_buffers(dev)) < 0)) -+ goto out_free_handler; ++ set_done(b); ++ buffer_written(dev, b); + -+ init_waitqueue_head(&dev->read_event); ++ /* Hopefully fix 'DQBUF return bad index if queue bigger then 2 for capture' ++ https://github.com/umlaeute/v4l2loopback/issues/60 */ ++ buf->flags &= ~V4L2_BUF_FLAG_DONE; ++ buf->flags |= V4L2_BUF_FLAG_QUEUED; + -+ /* register the device -> it creates /dev/video* */ -+ if (video_register_device(dev->vdev, VFL_TYPE_VIDEO, nr) < 0) { -+ printk(KERN_ERR -+ "v4l2loopback: failed video_register_device()\n"); -+ err = -EFAULT; -+ goto out_free_device; ++ wake_up_all(&dev->read_event); ++ return 0; ++ default: ++ return -EINVAL; + } -+ v4l2loopback_create_sysfs(dev->vdev); -+ -+ MARK(); -+ if (ret_nr) -+ *ret_nr = dev->vdev->num; -+ return 0; -+ -+out_free_device: -+ video_device_release(dev->vdev); -+out_free_handler: -+ v4l2_ctrl_handler_free(&dev->ctrl_handler); -+out_unregister: -+ video_set_drvdata(dev->vdev, NULL); -+ if (vdev_priv != NULL) -+ kfree(vdev_priv); -+ v4l2_device_unregister(&dev->v4l2_dev); -+out_free_idr: -+ idr_remove(&v4l2loopback_index_idr, nr); -+out_free_dev: -+ kfree(dev); -+ return err; +} + -+static void v4l2_loopback_remove(struct v4l2_loopback_device *dev) ++static int can_read(struct v4l2_loopback_device *dev, ++ struct v4l2_loopback_opener *opener) +{ -+ free_buffers(dev); -+ v4l2loopback_remove_sysfs(dev->vdev); -+ kfree(video_get_drvdata(dev->vdev)); -+ video_unregister_device(dev->vdev); -+ v4l2_device_unregister(&dev->v4l2_dev); -+ v4l2_ctrl_handler_free(&dev->ctrl_handler); -+ kfree(dev); ++ int ret; ++ ++ spin_lock_bh(&dev->lock); ++ check_timers(dev); ++ ret = dev->write_position > opener->read_position || ++ dev->reread_count > opener->reread_count || dev->timeout_happened; ++ spin_unlock_bh(&dev->lock); ++ return ret; +} + -+static long v4l2loopback_control_ioctl(struct file *file, unsigned int cmd, -+ unsigned long parm) ++static int get_capture_buffer(struct file *file) +{ -+ struct v4l2_loopback_device *dev; -+ struct v4l2_loopback_config conf; -+ struct v4l2_loopback_config *confptr = &conf; -+ int device_nr, capture_nr, output_nr; -+ int ret; ++ struct v4l2_loopback_device *dev = v4l2loopback_getdevice(file); ++ struct v4l2_loopback_opener *opener = fh_to_opener(file->private_data); ++ int pos, ret; ++ int timeout_happened; + -+ ret = mutex_lock_killable(&v4l2loopback_ctl_mutex); -+ if (ret) -+ return ret; ++ if ((file->f_flags & O_NONBLOCK) && ++ (dev->write_position <= opener->read_position && ++ dev->reread_count <= opener->reread_count && ++ !dev->timeout_happened)) ++ return -EAGAIN; ++ wait_event_interruptible(dev->read_event, can_read(dev, opener)); + -+ ret = -EINVAL; -+ switch (cmd) { -+ default: -+ ret = -ENOSYS; -+ break; -+ /* add a v4l2loopback device (pair), based on the user-provided specs */ -+ case V4L2LOOPBACK_CTL_ADD: -+ if (parm) { -+ if ((ret = copy_from_user(&conf, (void *)parm, -+ sizeof(conf))) < 0) -+ break; -+ } else -+ confptr = NULL; -+ ret = v4l2_loopback_add(confptr, &device_nr); -+ if (ret >= 0) -+ ret = device_nr; -+ break; -+ /* remove a v4l2loopback device (both capture and output) */ -+ case V4L2LOOPBACK_CTL_REMOVE: -+ ret = v4l2loopback_lookup((int)parm, &dev); -+ if (ret >= 0 && dev) { -+ int nr = ret; -+ ret = -EBUSY; -+ if (dev->open_count.counter > 0) -+ break; -+ idr_remove(&v4l2loopback_index_idr, nr); -+ v4l2_loopback_remove(dev); -+ ret = 0; -+ }; -+ break; -+ /* get information for a loopback device. -+ * this is mostly about limits (which cannot be queried directly with VIDIOC_G_FMT and friends -+ */ -+ case V4L2LOOPBACK_CTL_QUERY: -+ if (!parm) -+ break; -+ if ((ret = copy_from_user(&conf, (void *)parm, sizeof(conf))) < -+ 0) -+ break; -+ capture_nr = output_nr = conf.output_nr; -+#ifdef SPLIT_DEVICES -+ capture_nr = conf.capture_nr; -+#endif -+ device_nr = (output_nr < 0) ? capture_nr : output_nr; -+ MARK(); -+ /* get the device from either capture_nr or output_nr (whatever is valid) */ -+ if ((ret = v4l2loopback_lookup(device_nr, &dev)) < 0) -+ break; -+ MARK(); -+ /* if we got the device from output_nr and there is a valid capture_nr, -+ * make sure that both refer to the same device (or bail out) -+ */ -+ if ((device_nr != capture_nr) && (capture_nr >= 0) && -+ ((ret = v4l2loopback_lookup(capture_nr, 0)) < 0)) -+ break; -+ MARK(); -+ /* if otoh, we got the device from capture_nr and there is a valid output_nr, -+ * make sure that both refer to the same device (or bail out) -+ */ -+ if ((device_nr != output_nr) && (output_nr >= 0) && -+ ((ret = v4l2loopback_lookup(output_nr, 0)) < 0)) -+ break; -+ MARK(); ++ spin_lock_bh(&dev->lock); ++ if (dev->write_position == opener->read_position) { ++ if (dev->reread_count > opener->reread_count + 2) ++ opener->reread_count = dev->reread_count - 1; ++ ++opener->reread_count; ++ pos = v4l2l_mod64(opener->read_position + dev->used_buffers - 1, ++ dev->used_buffers); ++ } else { ++ opener->reread_count = 0; ++ if (dev->write_position > ++ opener->read_position + dev->used_buffers) ++ opener->read_position = dev->write_position - 1; ++ pos = v4l2l_mod64(opener->read_position, dev->used_buffers); ++ ++opener->read_position; ++ } ++ timeout_happened = dev->timeout_happened; ++ dev->timeout_happened = 0; ++ spin_unlock_bh(&dev->lock); + -+ /* v4l2_loopback_config identified a single device, so fetch the data */ -+ snprintf(conf.card_label, sizeof(conf.card_label), "%s", -+ dev->card_label); -+ MARK(); -+ conf.output_nr = dev->vdev->num; -+#ifdef SPLIT_DEVICES -+ conf.capture_nr = dev->vdev->num; -+#endif -+ conf.min_width = dev->min_width; -+ conf.min_height = dev->min_height; -+ conf.max_width = dev->max_width; -+ conf.max_height = dev->max_height; -+ conf.announce_all_caps = dev->announce_all_caps; -+ conf.max_buffers = dev->buffers_number; -+ conf.max_openers = dev->max_openers; -+ conf.debug = debug; -+ MARK(); -+ if (copy_to_user((void *)parm, &conf, sizeof(conf))) { -+ ret = -EFAULT; -+ break; ++ ret = dev->bufpos2index[pos]; ++ if (timeout_happened) { ++ if (ret < 0) { ++ dprintk("trying to return not mapped buf[%d]\n", ret); ++ return -EFAULT; + } -+ MARK(); -+ ret = 0; -+ ; -+ break; ++ /* although allocated on-demand, timeout_image is freed only ++ * in free_buffers(), so we don't need to worry about it being ++ * deallocated suddenly */ ++ memcpy(dev->image + dev->buffers[ret].buffer.m.offset, ++ dev->timeout_image, dev->buffer_size); + } -+ -+ MARK(); -+ mutex_unlock(&v4l2loopback_ctl_mutex); -+ MARK(); + return ret; +} + -+/* LINUX KERNEL */ -+ -+static const struct file_operations v4l2loopback_ctl_fops = { -+ // clang-format off -+ .owner = THIS_MODULE, -+ .open = nonseekable_open, -+ .unlocked_ioctl = v4l2loopback_control_ioctl, -+ .compat_ioctl = v4l2loopback_control_ioctl, -+ .llseek = noop_llseek, -+ // clang-format on -+}; -+ -+static struct miscdevice v4l2loopback_misc = { -+ // clang-format off -+ .minor = MISC_DYNAMIC_MINOR, -+ .name = "v4l2loopback", -+ .fops = &v4l2loopback_ctl_fops, -+ // clang-format on -+}; -+ -+static const struct v4l2_file_operations v4l2_loopback_fops = { -+ // clang-format off -+ .owner = THIS_MODULE, -+ .open = v4l2_loopback_open, -+ .release = v4l2_loopback_close, -+ .read = v4l2_loopback_read, -+ .write = v4l2_loopback_write, -+ .poll = v4l2_loopback_poll, -+ .mmap = v4l2_loopback_mmap, -+ .unlocked_ioctl = video_ioctl2, -+ // clang-format on -+}; -+ -+static const struct v4l2_ioctl_ops v4l2_loopback_ioctl_ops = { -+ // clang-format off -+ .vidioc_querycap = &vidioc_querycap, -+ .vidioc_enum_framesizes = &vidioc_enum_framesizes, -+ .vidioc_enum_frameintervals = &vidioc_enum_frameintervals, ++/* put buffer to dequeue ++ * called on VIDIOC_DQBUF ++ */ ++static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) ++{ ++ struct v4l2_loopback_device *dev; ++ struct v4l2_loopback_opener *opener; ++ int index; ++ struct v4l2l_buffer *b; + -+ .vidioc_enum_output = &vidioc_enum_output, -+ .vidioc_g_output = &vidioc_g_output, -+ .vidioc_s_output = &vidioc_s_output, ++ dev = v4l2loopback_getdevice(file); ++ opener = fh_to_opener(fh); ++ if (opener->timeout_image_io) { ++ *buf = dev->timeout_image_buffer.buffer; ++ return 0; ++ } + -+ .vidioc_enum_input = &vidioc_enum_input, -+ .vidioc_g_input = &vidioc_g_input, -+ .vidioc_s_input = &vidioc_s_input, ++ switch (buf->type) { ++ case V4L2_BUF_TYPE_VIDEO_CAPTURE: ++ index = get_capture_buffer(file); ++ if (index < 0) ++ return index; ++ dprintkrw("capture DQBUF pos: %lld index: %d\n", ++ (long long)(opener->read_position - 1), index); ++ if (!(dev->buffers[index].buffer.flags & ++ V4L2_BUF_FLAG_MAPPED)) { ++ dprintk("trying to return not mapped buf[%d]\n", index); ++ return -EINVAL; ++ } ++ unset_flags(&dev->buffers[index]); ++ *buf = dev->buffers[index].buffer; ++ dprintkrw( ++ "dqbuf(CAPTURE)#%d: buffer#%d @ %p type=%d bytesused=%d length=%d flags=%x field=%d timestamp=%lld.%06ld sequence=%d\n", ++ index, buf->index, buf, buf->type, buf->bytesused, ++ buf->length, buf->flags, buf->field, ++ (long long)buf->timestamp.tv_sec, ++ (long int)buf->timestamp.tv_usec, buf->sequence); ++ return 0; ++ case V4L2_BUF_TYPE_VIDEO_OUTPUT: ++ spin_lock_bh(&dev->list_lock); + -+ .vidioc_enum_fmt_vid_cap = &vidioc_enum_fmt_cap, -+ .vidioc_g_fmt_vid_cap = &vidioc_g_fmt_cap, -+ .vidioc_s_fmt_vid_cap = &vidioc_s_fmt_cap, -+ .vidioc_try_fmt_vid_cap = &vidioc_try_fmt_cap, ++ b = list_entry(dev->outbufs_list.prev, struct v4l2l_buffer, ++ list_head); ++ list_move_tail(&b->list_head, &dev->outbufs_list); + -+ .vidioc_enum_fmt_vid_out = &vidioc_enum_fmt_out, -+ .vidioc_s_fmt_vid_out = &vidioc_s_fmt_out, -+ .vidioc_g_fmt_vid_out = &vidioc_g_fmt_out, -+ .vidioc_try_fmt_vid_out = &vidioc_try_fmt_out, ++ spin_unlock_bh(&dev->list_lock); ++ dprintkrw("output DQBUF index: %d\n", b->buffer.index); ++ unset_flags(b); ++ *buf = b->buffer; ++ buf->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; ++ dprintkrw( ++ "dqbuf(OUTPUT)#%d: buffer#%d @ %p type=%d bytesused=%d length=%d flags=%x field=%d timestamp=%lld.%06ld sequence=%d\n", ++ index, buf->index, buf, buf->type, buf->bytesused, ++ buf->length, buf->flags, buf->field, ++ (long long)buf->timestamp.tv_sec, ++ (long int)buf->timestamp.tv_usec, buf->sequence); ++ return 0; ++ default: ++ return -EINVAL; ++ } ++} + -+#ifdef V4L2L_OVERLAY -+ .vidioc_s_fmt_vid_overlay = &vidioc_s_fmt_overlay, -+ .vidioc_g_fmt_vid_overlay = &vidioc_g_fmt_overlay, -+#endif ++/* ------------- STREAMING ------------------- */ + -+#ifdef V4L2LOOPBACK_WITH_STD -+ .vidioc_s_std = &vidioc_s_std, -+ .vidioc_g_std = &vidioc_g_std, -+ .vidioc_querystd = &vidioc_querystd, -+#endif /* V4L2LOOPBACK_WITH_STD */ ++/* start streaming ++ * called on VIDIOC_STREAMON ++ */ ++static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type type) ++{ ++ struct v4l2_loopback_device *dev; ++ struct v4l2_loopback_opener *opener; ++ MARK(); + -+ .vidioc_g_parm = &vidioc_g_parm, -+ .vidioc_s_parm = &vidioc_s_parm, ++ dev = v4l2loopback_getdevice(file); ++ opener = fh_to_opener(fh); + -+ .vidioc_reqbufs = &vidioc_reqbufs, -+ .vidioc_querybuf = &vidioc_querybuf, -+ .vidioc_qbuf = &vidioc_qbuf, -+ .vidioc_dqbuf = &vidioc_dqbuf, ++ switch (type) { ++ case V4L2_BUF_TYPE_VIDEO_OUTPUT: ++ if (!dev->ready_for_capture) { ++ int ret = allocate_buffers(dev); ++ if (ret < 0) ++ return ret; ++ } ++ opener->type = WRITER; ++ dev->ready_for_output = 0; ++ dev->ready_for_capture++; ++ return 0; ++ case V4L2_BUF_TYPE_VIDEO_CAPTURE: ++ if (!dev->ready_for_capture) ++ return -EIO; ++ if (dev->active_readers > 0) ++ return -EBUSY; ++ opener->type = READER; ++ dev->active_readers++; ++ client_usage_queue_event(dev->vdev); ++ return 0; ++ default: ++ return -EINVAL; ++ } ++ return -EINVAL; ++} + -+ .vidioc_streamon = &vidioc_streamon, -+ .vidioc_streamoff = &vidioc_streamoff, ++/* stop streaming ++ * called on VIDIOC_STREAMOFF ++ */ ++static int vidioc_streamoff(struct file *file, void *fh, ++ enum v4l2_buf_type type) ++{ ++ struct v4l2_loopback_device *dev; ++ struct v4l2_loopback_opener *opener; + -+#ifdef CONFIG_VIDEO_V4L1_COMPAT -+ .vidiocgmbuf = &vidiocgmbuf, -+#endif ++ MARK(); ++ dprintk("%d\n", type); + -+ .vidioc_subscribe_event = &vidioc_subscribe_event, -+ .vidioc_unsubscribe_event = &v4l2_event_unsubscribe, -+ // clang-format on -+}; ++ dev = v4l2loopback_getdevice(file); ++ opener = fh_to_opener(fh); ++ switch (type) { ++ case V4L2_BUF_TYPE_VIDEO_OUTPUT: ++ if (dev->ready_for_capture > 0) ++ dev->ready_for_capture--; ++ return 0; ++ case V4L2_BUF_TYPE_VIDEO_CAPTURE: ++ if (opener->type == READER) { ++ opener->type = 0; ++ dev->active_readers--; ++ client_usage_queue_event(dev->vdev); ++ } ++ return 0; ++ default: ++ return -EINVAL; ++ } ++ return -EINVAL; ++} + -+static int free_device_cb(int id, void *ptr, void *data) ++#ifdef CONFIG_VIDEO_V4L1_COMPAT ++static int vidiocgmbuf(struct file *file, void *fh, struct video_mbuf *p) +{ -+ struct v4l2_loopback_device *dev = ptr; -+ v4l2_loopback_remove(dev); ++ struct v4l2_loopback_device *dev; ++ MARK(); ++ ++ dev = v4l2loopback_getdevice(file); ++ p->frames = dev->buffers_number; ++ p->offsets[0] = 0; ++ p->offsets[1] = 0; ++ p->size = dev->buffer_size; + return 0; +} -+static void free_devices(void) ++#endif ++ ++static void client_usage_queue_event(struct video_device *vdev) +{ -+ idr_for_each(&v4l2loopback_index_idr, &free_device_cb, NULL); -+ idr_destroy(&v4l2loopback_index_idr); ++ struct v4l2_event ev; ++ struct v4l2_loopback_device *dev; ++ ++ dev = container_of(vdev->v4l2_dev, struct v4l2_loopback_device, ++ v4l2_dev); ++ ++ memset(&ev, 0, sizeof(ev)); ++ ev.type = V4L2_EVENT_PRI_CLIENT_USAGE; ++ ((struct v4l2_event_client_usage *)&ev.u)->count = dev->active_readers; ++ ++ v4l2_event_queue(vdev, &ev); +} + -+static int __init v4l2loopback_init_module(void) ++static int client_usage_ops_add(struct v4l2_subscribed_event *sev, ++ unsigned elems) +{ -+ const u32 min_width = V4L2LOOPBACK_SIZE_MIN_WIDTH; -+ const u32 min_height = V4L2LOOPBACK_SIZE_MIN_HEIGHT; -+ int err; -+ int i; -+ MARK(); ++ if (!(sev->flags & V4L2_EVENT_SUB_FL_SEND_INITIAL)) ++ return 0; + -+ err = misc_register(&v4l2loopback_misc); -+ if (err < 0) -+ return err; ++ client_usage_queue_event(sev->fh->vdev); ++ return 0; ++} + -+ if (devices < 0) { -+ devices = 1; ++static void client_usage_ops_replace(struct v4l2_event *old, ++ const struct v4l2_event *new) ++{ ++ *((struct v4l2_event_client_usage *)&old->u) = ++ *((struct v4l2_event_client_usage *)&new->u); ++} + -+ /* try guessing the devices from the "video_nr" parameter */ -+ for (i = MAX_DEVICES - 1; i >= 0; i--) { -+ if (video_nr[i] >= 0) { -+ devices = i + 1; -+ break; -+ } -+ } -+ } ++static void client_usage_ops_merge(const struct v4l2_event *old, ++ struct v4l2_event *new) ++{ ++ *((struct v4l2_event_client_usage *)&new->u) = ++ *((struct v4l2_event_client_usage *)&old->u); ++} + -+ if (devices > MAX_DEVICES) { -+ devices = MAX_DEVICES; -+ printk(KERN_INFO -+ "v4l2loopback: number of initial devices is limited to: %d\n", -+ MAX_DEVICES); -+ } ++const struct v4l2_subscribed_event_ops client_usage_ops = { ++ .add = client_usage_ops_add, ++ .replace = client_usage_ops_replace, ++ .merge = client_usage_ops_merge, ++}; + -+ if (max_buffers > MAX_BUFFERS) { -+ max_buffers = MAX_BUFFERS; -+ printk(KERN_INFO -+ "v4l2loopback: number of buffers is limited to: %d\n", -+ MAX_BUFFERS); ++static int vidioc_subscribe_event(struct v4l2_fh *fh, ++ const struct v4l2_event_subscription *sub) ++{ ++ switch (sub->type) { ++ case V4L2_EVENT_CTRL: ++ return v4l2_ctrl_subscribe_event(fh, sub); ++ case V4L2_EVENT_PRI_CLIENT_USAGE: ++ return v4l2_event_subscribe(fh, sub, 0, &client_usage_ops); + } + -+ if (max_openers < 0) { -+ printk(KERN_INFO -+ "v4l2loopback: allowing %d openers rather than %d\n", -+ 2, max_openers); -+ max_openers = 2; -+ } ++ return -EINVAL; ++} + -+ if (max_width < min_width) { -+ max_width = V4L2LOOPBACK_SIZE_DEFAULT_MAX_WIDTH; -+ printk(KERN_INFO "v4l2loopback: using max_width %d\n", -+ max_width); ++/* file operations */ ++static void vm_open(struct vm_area_struct *vma) ++{ ++ struct v4l2l_buffer *buf; ++ MARK(); ++ ++ buf = vma->vm_private_data; ++ buf->use_count++; ++ ++ buf->buffer.flags |= V4L2_BUF_FLAG_MAPPED; ++} ++ ++static void vm_close(struct vm_area_struct *vma) ++{ ++ struct v4l2l_buffer *buf; ++ MARK(); ++ ++ buf = vma->vm_private_data; ++ buf->use_count--; ++ ++ if (buf->use_count <= 0) ++ buf->buffer.flags &= ~V4L2_BUF_FLAG_MAPPED; ++} ++ ++static struct vm_operations_struct vm_ops = { ++ .open = vm_open, ++ .close = vm_close, ++}; ++ ++static int v4l2_loopback_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ u8 *addr; ++ unsigned long start; ++ unsigned long size; ++ struct v4l2_loopback_device *dev; ++ struct v4l2_loopback_opener *opener; ++ struct v4l2l_buffer *buffer = NULL; ++ MARK(); ++ ++ start = (unsigned long)vma->vm_start; ++ size = (unsigned long)(vma->vm_end - vma->vm_start); ++ ++ dev = v4l2loopback_getdevice(file); ++ opener = fh_to_opener(file->private_data); ++ ++ if (size > dev->buffer_size) { ++ dprintk("userspace tries to mmap too much, fail\n"); ++ return -EINVAL; + } -+ if (max_height < min_height) { -+ max_height = V4L2LOOPBACK_SIZE_DEFAULT_MAX_HEIGHT; -+ printk(KERN_INFO "v4l2loopback: using max_height %d\n", -+ max_height); ++ if (opener->timeout_image_io) { ++ /* we are going to map the timeout_image_buffer */ ++ if ((vma->vm_pgoff << PAGE_SHIFT) != ++ dev->buffer_size * MAX_BUFFERS) { ++ dprintk("invalid mmap offset for timeout_image_io mode\n"); ++ return -EINVAL; ++ } ++ } else if ((vma->vm_pgoff << PAGE_SHIFT) > ++ dev->buffer_size * (dev->buffers_number - 1)) { ++ dprintk("userspace tries to mmap too far, fail\n"); ++ return -EINVAL; + } + -+ for (i = 0; i < devices; i++) { -+ struct v4l2_loopback_config cfg = { -+ // clang-format off -+ .output_nr = video_nr[i], -+#ifdef SPLIT_DEVICES -+ .capture_nr = video_nr[i], -+#endif -+ .min_width = min_width, -+ .min_height = min_height, -+ .max_width = max_width, -+ .max_height = max_height, -+ .announce_all_caps = (!exclusive_caps[i]), -+ .max_buffers = max_buffers, -+ .max_openers = max_openers, -+ .debug = debug, -+ // clang-format on -+ }; -+ cfg.card_label[0] = 0; -+ if (card_label[i]) -+ snprintf(cfg.card_label, sizeof(cfg.card_label), "%s", -+ card_label[i]); -+ err = v4l2_loopback_add(&cfg, 0); -+ if (err) { -+ free_devices(); -+ goto error; ++ /* FIXXXXXME: allocation should not happen here! */ ++ if (NULL == dev->image) ++ if (allocate_buffers(dev) < 0) ++ return -EINVAL; ++ ++ if (opener->timeout_image_io) { ++ buffer = &dev->timeout_image_buffer; ++ addr = dev->timeout_image; ++ } else { ++ int i; ++ for (i = 0; i < dev->buffers_number; ++i) { ++ buffer = &dev->buffers[i]; ++ if ((buffer->buffer.m.offset >> PAGE_SHIFT) == ++ vma->vm_pgoff) ++ break; + } ++ ++ if (i >= dev->buffers_number) ++ return -EINVAL; ++ ++ addr = dev->image + (vma->vm_pgoff << PAGE_SHIFT); + } + -+ dprintk("module installed\n"); ++ while (size > 0) { ++ struct page *page; + -+ printk(KERN_INFO "v4l2loopback driver version %d.%d.%d%s loaded\n", -+ // clang-format off -+ (V4L2LOOPBACK_VERSION_CODE >> 16) & 0xff, -+ (V4L2LOOPBACK_VERSION_CODE >> 8) & 0xff, -+ (V4L2LOOPBACK_VERSION_CODE ) & 0xff, -+#ifdef SNAPSHOT_VERSION -+ " (" __stringify(SNAPSHOT_VERSION) ")" -+#else -+ "" -+#endif -+ ); -+ // clang-format on ++ page = vmalloc_to_page(addr); ++ ++ if (vm_insert_page(vma, start, page) < 0) ++ return -EAGAIN; ++ ++ start += PAGE_SIZE; ++ addr += PAGE_SIZE; ++ size -= PAGE_SIZE; ++ } + ++ vma->vm_ops = &vm_ops; ++ vma->vm_private_data = buffer; ++ ++ vm_open(vma); ++ ++ MARK(); + return 0; -+error: -+ misc_deregister(&v4l2loopback_misc); -+ return err; +} + -+static void v4l2loopback_cleanup_module(void) ++static unsigned int v4l2_loopback_poll(struct file *file, ++ struct poll_table_struct *pts) +{ ++ struct v4l2_loopback_opener *opener; ++ struct v4l2_loopback_device *dev; ++ __poll_t req_events = poll_requested_events(pts); ++ int ret_mask = 0; + MARK(); -+ /* unregister the device -> it deletes /dev/video* */ -+ free_devices(); -+ /* and get rid of /dev/v4l2loopback */ -+ misc_deregister(&v4l2loopback_misc); -+ dprintk("module removed\n"); ++ ++ opener = fh_to_opener(file->private_data); ++ dev = v4l2loopback_getdevice(file); ++ ++ if (req_events & POLLPRI) { ++ if (!v4l2_event_pending(&opener->fh)) ++ poll_wait(file, &opener->fh.wait, pts); ++ if (v4l2_event_pending(&opener->fh)) { ++ ret_mask |= POLLPRI; ++ if (!(req_events & DEFAULT_POLLMASK)) ++ return ret_mask; ++ } ++ } ++ ++ switch (opener->type) { ++ case WRITER: ++ ret_mask |= POLLOUT | POLLWRNORM; ++ break; ++ case READER: ++ if (!can_read(dev, opener)) { ++ if (ret_mask) ++ return ret_mask; ++ poll_wait(file, &dev->read_event, pts); ++ } ++ if (can_read(dev, opener)) ++ ret_mask |= POLLIN | POLLRDNORM; ++ if (v4l2_event_pending(&opener->fh)) ++ ret_mask |= POLLPRI; ++ break; ++ default: ++ break; ++ } ++ ++ MARK(); ++ return ret_mask; +} + -+MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR); ++/* do not want to limit device opens, it can be as many readers as user want, ++ * writers are limited by means of setting writer field */ ++static int v4l2_loopback_open(struct file *file) ++{ ++ struct v4l2_loopback_device *dev; ++ struct v4l2_loopback_opener *opener; ++ MARK(); ++ dev = v4l2loopback_getdevice(file); ++ if (dev->open_count.counter >= dev->max_openers) ++ return -EBUSY; ++ /* kfree on close */ ++ opener = kzalloc(sizeof(*opener), GFP_KERNEL); ++ if (opener == NULL) ++ return -ENOMEM; + -+module_init(v4l2loopback_init_module); -+module_exit(v4l2loopback_cleanup_module); -diff --git a/drivers/media/v4l2-core/v4l2loopback.h b/drivers/media/v4l2-core/v4l2loopback.h -new file mode 100644 -index 000000000000..1bc7e6b747a4 ---- /dev/null -+++ b/drivers/media/v4l2-core/v4l2loopback.h -@@ -0,0 +1,98 @@ -+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ -+/* -+ * v4l2loopback.h -+ * -+ * Written by IOhannes m zmölnig, 7/1/20. -+ * -+ * Copyright 2020 by IOhannes m zmölnig. Redistribution of this file is -+ * permitted under the GNU General Public License. -+ */ -+#ifndef _V4L2LOOPBACK_H -+#define _V4L2LOOPBACK_H ++ atomic_inc(&dev->open_count); + -+#define V4L2LOOPBACK_VERSION_MAJOR 0 -+#define V4L2LOOPBACK_VERSION_MINOR 13 -+#define V4L2LOOPBACK_VERSION_BUGFIX 1 ++ opener->timeout_image_io = dev->timeout_image_io; ++ if (opener->timeout_image_io) { ++ int r = allocate_timeout_image(dev); + -+/* /dev/v4l2loopback interface */ ++ if (r < 0) { ++ dprintk("timeout image allocation failed\n"); + -+struct v4l2_loopback_config { -+ /** -+ * the device-number (/dev/video) -+ * V4L2LOOPBACK_CTL_ADD: -+ * setting this to a value<0, will allocate an available one -+ * if nr>=0 and the device already exists, the ioctl will EEXIST -+ * if output_nr and capture_nr are the same, only a single device will be created -+ * NOTE: currently split-devices (where output_nr and capture_nr differ) -+ * are not implemented yet. -+ * until then, requesting different device-IDs will result in EINVAL. -+ * -+ * V4L2LOOPBACK_CTL_QUERY: -+ * either both output_nr and capture_nr must refer to the same loopback, -+ * or one (and only one) of them must be -1 -+ * -+ */ -+ int output_nr; -+ int unused; /*capture_nr;*/ ++ atomic_dec(&dev->open_count); + -+ /** -+ * a nice name for your device -+ * if (*card_label)==0, an automatic name is assigned -+ */ -+ char card_label[32]; ++ kfree(opener); ++ return r; ++ } ++ } + -+ /** -+ * allowed frame size -+ * if too low, default values are used -+ */ -+ unsigned int min_width; -+ unsigned int max_width; -+ unsigned int min_height; -+ unsigned int max_height; ++ v4l2_fh_init(&opener->fh, video_devdata(file)); ++ file->private_data = &opener->fh; + -+ /** -+ * number of buffers to allocate for the queue -+ * if set to <=0, default values are used -+ */ -+ int max_buffers; ++ v4l2_fh_add(&opener->fh); ++ dprintk("opened dev:%p with image:%p\n", dev, dev ? dev->image : NULL); ++ MARK(); ++ return 0; ++} + -+ /** -+ * how many consumers are allowed to open this device concurrently -+ * if set to <=0, default values are used -+ */ -+ int max_openers; ++static int v4l2_loopback_close(struct file *file) ++{ ++ struct v4l2_loopback_opener *opener; ++ struct v4l2_loopback_device *dev; ++ int is_writer = 0, is_reader = 0; ++ MARK(); + -+ /** -+ * set the debugging level for this device -+ */ -+ int debug; ++ opener = fh_to_opener(file->private_data); ++ dev = v4l2loopback_getdevice(file); + -+ /** -+ * whether to announce OUTPUT/CAPTURE capabilities exclusively -+ * for this device or not -+ * (!exclusive_caps) -+ * NOTE: this is going to be removed once separate output/capture -+ * devices are implemented -+ */ -+ int announce_all_caps; -+}; ++ if (WRITER == opener->type) ++ is_writer = 1; ++ if (READER == opener->type) ++ is_reader = 1; + -+/* a pointer to a (struct v4l2_loopback_config) that has all values you wish to impose on the -+ * to-be-created device set. -+ * if the ptr is NULL, a new device is created with default values at the driver's discretion. -+ * -+ * returns the device_nr of the OUTPUT device (which can be used with V4L2LOOPBACK_CTL_QUERY, -+ * to get more information on the device) -+ */ -+#define V4L2LOOPBACK_CTL_ADD 0x4C80 ++ atomic_dec(&dev->open_count); ++ if (dev->open_count.counter == 0) { ++ del_timer_sync(&dev->sustain_timer); ++ del_timer_sync(&dev->timeout_timer); ++ } ++ try_free_buffers(dev); + -+/* a pointer to a (struct v4l2_loopback_config) that has output_nr and/or capture_nr set -+ * (the two values must either refer to video-devices associated with the same loopback device -+ * or exactly one of them must be <0 -+ */ -+#define V4L2LOOPBACK_CTL_QUERY 0x4C82 ++ v4l2_fh_del(&opener->fh); ++ v4l2_fh_exit(&opener->fh); + -+/* the device-number (either CAPTURE or OUTPUT) associated with the loopback-device */ -+#define V4L2LOOPBACK_CTL_REMOVE 0x4C81 ++ kfree(opener); ++ if (is_writer) ++ dev->ready_for_output = 1; ++ if (is_reader) { ++ dev->active_readers--; ++ client_usage_queue_event(dev->vdev); ++ } ++ MARK(); ++ return 0; ++} + -+#endif /* _V4L2LOOPBACK_H */ -diff --git a/drivers/media/v4l2-core/v4l2loopback_formats.h b/drivers/media/v4l2-core/v4l2loopback_formats.h -new file mode 100644 -index 000000000000..d855a3796554 ---- /dev/null -+++ b/drivers/media/v4l2-core/v4l2loopback_formats.h -@@ -0,0 +1,445 @@ -+static const struct v4l2l_format formats[] = { -+#ifndef V4L2_PIX_FMT_VP9 -+#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') -+#endif -+#ifndef V4L2_PIX_FMT_HEVC -+#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') -+#endif ++static ssize_t v4l2_loopback_read(struct file *file, char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ int read_index; ++ struct v4l2_loopback_device *dev; ++ struct v4l2_buffer *b; ++ MARK(); + -+ /* here come the packed formats */ -+ { -+ .name = "32 bpp RGB, le", -+ .fourcc = V4L2_PIX_FMT_BGR32, -+ .depth = 32, -+ .flags = 0, -+ }, -+ { -+ .name = "32 bpp RGB, be", -+ .fourcc = V4L2_PIX_FMT_RGB32, -+ .depth = 32, -+ .flags = 0, -+ }, -+ { -+ .name = "24 bpp RGB, le", -+ .fourcc = V4L2_PIX_FMT_BGR24, -+ .depth = 24, -+ .flags = 0, -+ }, -+ { -+ .name = "24 bpp RGB, be", -+ .fourcc = V4L2_PIX_FMT_RGB24, -+ .depth = 24, -+ .flags = 0, -+ }, -+#ifdef V4L2_PIX_FMT_ABGR32 -+ { -+ .name = "32 bpp RGBA, le", -+ .fourcc = V4L2_PIX_FMT_ABGR32, -+ .depth = 32, -+ .flags = 0, -+ }, ++ dev = v4l2loopback_getdevice(file); ++ ++ read_index = get_capture_buffer(file); ++ if (read_index < 0) ++ return read_index; ++ if (count > dev->buffer_size) ++ count = dev->buffer_size; ++ b = &dev->buffers[read_index].buffer; ++ if (count > b->bytesused) ++ count = b->bytesused; ++ if (copy_to_user((void *)buf, (void *)(dev->image + b->m.offset), ++ count)) { ++ printk(KERN_ERR ++ "v4l2-loopback: failed copy_to_user() in read buf\n"); ++ return -EFAULT; ++ } ++ dprintkrw("leave v4l2_loopback_read()\n"); ++ return count; ++} ++ ++static ssize_t v4l2_loopback_write(struct file *file, const char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ struct v4l2_loopback_opener *opener; ++ struct v4l2_loopback_device *dev; ++ int write_index; ++ struct v4l2_buffer *b; ++ int err = 0; ++ ++ MARK(); ++ ++ dev = v4l2loopback_getdevice(file); ++ opener = fh_to_opener(file->private_data); ++ ++ if (UNNEGOTIATED == opener->type) { ++ spin_lock(&dev->lock); ++ ++ if (dev->ready_for_output) { ++ err = vidioc_streamon(file, file->private_data, ++ V4L2_BUF_TYPE_VIDEO_OUTPUT); ++ } ++ ++ spin_unlock(&dev->lock); ++ ++ if (err < 0) ++ return err; ++ } ++ ++ if (WRITER != opener->type) ++ return -EINVAL; ++ ++ if (!dev->ready_for_capture) { ++ int ret = allocate_buffers(dev); ++ if (ret < 0) ++ return ret; ++ dev->ready_for_capture = 1; ++ } ++ dprintkrw("v4l2_loopback_write() trying to write %zu bytes\n", count); ++ if (count > dev->buffer_size) ++ count = dev->buffer_size; ++ ++ write_index = v4l2l_mod64(dev->write_position, dev->used_buffers); ++ b = &dev->buffers[write_index].buffer; ++ ++ if (copy_from_user((void *)(dev->image + b->m.offset), (void *)buf, ++ count)) { ++ printk(KERN_ERR ++ "v4l2-loopback: failed copy_from_user() in write buf, could not write %zu\n", ++ count); ++ return -EFAULT; ++ } ++ v4l2l_get_timestamp(b); ++ b->bytesused = count; ++ b->sequence = dev->write_position; ++ buffer_written(dev, &dev->buffers[write_index]); ++ wake_up_all(&dev->read_event); ++ dprintkrw("leave v4l2_loopback_write()\n"); ++ return count; ++} ++ ++/* init functions */ ++/* frees buffers, if already allocated */ ++static void free_buffers(struct v4l2_loopback_device *dev) ++{ ++ MARK(); ++ dprintk("freeing image@%p for dev:%p\n", dev ? dev->image : NULL, dev); ++ if (!dev) ++ return; ++ if (dev->image) { ++ vfree(dev->image); ++ dev->image = NULL; ++ } ++ if (dev->timeout_image) { ++ vfree(dev->timeout_image); ++ dev->timeout_image = NULL; ++ } ++ dev->imagesize = 0; ++} ++/* frees buffers, if they are no longer needed */ ++static void try_free_buffers(struct v4l2_loopback_device *dev) ++{ ++ MARK(); ++ if (0 == dev->open_count.counter && !dev->keep_format) { ++ free_buffers(dev); ++ dev->ready_for_capture = 0; ++ dev->buffer_size = 0; ++ dev->write_position = 0; ++ } ++} ++/* allocates buffers, if buffer_size is set */ ++static int allocate_buffers(struct v4l2_loopback_device *dev) ++{ ++ int err; ++ ++ MARK(); ++ /* vfree on close file operation in case no open handles left */ ++ ++ if (dev->buffer_size < 1 || dev->buffers_number < 1) ++ return -EINVAL; ++ ++ if ((__LONG_MAX__ / dev->buffer_size) < dev->buffers_number) ++ return -ENOSPC; ++ ++ if (dev->image) { ++ dprintk("allocating buffers again: %ld %ld\n", ++ dev->buffer_size * dev->buffers_number, dev->imagesize); ++ /* FIXME: prevent double allocation more intelligently! */ ++ if (dev->buffer_size * dev->buffers_number == dev->imagesize) ++ return 0; ++ ++ /* check whether the total number of readers/writers is <=1 */ ++ if ((dev->ready_for_capture + dev->active_readers) <= 1) ++ free_buffers(dev); ++ else ++ return -EINVAL; ++ } ++ ++ dev->imagesize = (unsigned long)dev->buffer_size * ++ (unsigned long)dev->buffers_number; ++ ++ dprintk("allocating %ld = %ldx%d\n", dev->imagesize, dev->buffer_size, ++ dev->buffers_number); ++ err = -ENOMEM; ++ ++ if (dev->timeout_jiffies > 0) { ++ err = allocate_timeout_image(dev); ++ if (err < 0) ++ goto error; ++ } ++ ++ dev->image = vmalloc(dev->imagesize); ++ if (dev->image == NULL) ++ goto error; ++ ++ dprintk("vmallocated %ld bytes\n", dev->imagesize); ++ MARK(); ++ ++ init_buffers(dev); ++ return 0; ++ ++error: ++ free_buffers(dev); ++ return err; ++} ++ ++/* init inner buffers, they are capture mode and flags are set as ++ * for capture mod buffers */ ++static void init_buffers(struct v4l2_loopback_device *dev) ++{ ++ int i; ++ int buffer_size; ++ int bytesused; ++ MARK(); ++ ++ buffer_size = dev->buffer_size; ++ bytesused = dev->pix_format.sizeimage; ++ for (i = 0; i < dev->buffers_number; ++i) { ++ struct v4l2_buffer *b = &dev->buffers[i].buffer; ++ b->index = i; ++ b->bytesused = bytesused; ++ b->length = buffer_size; ++ b->field = V4L2_FIELD_NONE; ++ b->flags = 0; ++ b->m.offset = i * buffer_size; ++ b->memory = V4L2_MEMORY_MMAP; ++ b->sequence = 0; ++ b->timestamp.tv_sec = 0; ++ b->timestamp.tv_usec = 0; ++ b->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ++ ++ v4l2l_get_timestamp(b); ++ } ++ dev->timeout_image_buffer = dev->buffers[0]; ++ dev->timeout_image_buffer.buffer.m.offset = MAX_BUFFERS * buffer_size; ++ MARK(); ++} ++ ++static int allocate_timeout_image(struct v4l2_loopback_device *dev) ++{ ++ MARK(); ++ if (dev->buffer_size <= 0) { ++ dev->timeout_image_io = 0; ++ return -EINVAL; ++ } ++ ++ if (dev->timeout_image == NULL) { ++ dev->timeout_image = vzalloc(dev->buffer_size); ++ if (dev->timeout_image == NULL) { ++ dev->timeout_image_io = 0; ++ return -ENOMEM; ++ } ++ } ++ return 0; ++} ++ ++/* fills and register video device */ ++static void init_vdev(struct video_device *vdev, int nr) ++{ ++ MARK(); ++ ++#ifdef V4L2LOOPBACK_WITH_STD ++ vdev->tvnorms = V4L2_STD_ALL; ++#endif /* V4L2LOOPBACK_WITH_STD */ ++ ++ vdev->vfl_type = VFL_TYPE_VIDEO; ++ vdev->fops = &v4l2_loopback_fops; ++ vdev->ioctl_ops = &v4l2_loopback_ioctl_ops; ++ vdev->release = &video_device_release; ++ vdev->minor = -1; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) ++ vdev->device_caps = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_VIDEO_CAPTURE | ++ V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE | ++ V4L2_CAP_STREAMING; +#endif -+#ifdef V4L2_PIX_FMT_RGBA32 -+ { -+ .name = "32 bpp RGBA", -+ .fourcc = V4L2_PIX_FMT_RGBA32, -+ .depth = 32, -+ .flags = 0, -+ }, ++ ++ if (debug > 1) ++ vdev->dev_debug = V4L2_DEV_DEBUG_IOCTL | ++ V4L2_DEV_DEBUG_IOCTL_ARG; ++ ++ vdev->vfl_dir = VFL_DIR_M2M; ++ ++ MARK(); ++} ++ ++/* init default capture parameters, only fps may be changed in future */ ++static void init_capture_param(struct v4l2_captureparm *capture_param) ++{ ++ MARK(); ++ capture_param->capability = 0; ++ capture_param->capturemode = 0; ++ capture_param->extendedmode = 0; ++ capture_param->readbuffers = max_buffers; ++ capture_param->timeperframe.numerator = 1; ++ capture_param->timeperframe.denominator = 30; ++} ++ ++static void check_timers(struct v4l2_loopback_device *dev) ++{ ++ if (!dev->ready_for_capture) ++ return; ++ ++ if (dev->timeout_jiffies > 0 && !timer_pending(&dev->timeout_timer)) ++ mod_timer(&dev->timeout_timer, jiffies + dev->timeout_jiffies); ++ if (dev->sustain_framerate && !timer_pending(&dev->sustain_timer)) ++ mod_timer(&dev->sustain_timer, ++ jiffies + dev->frame_jiffies * 3 / 2); ++} ++#ifdef HAVE_TIMER_SETUP ++static void sustain_timer_clb(struct timer_list *t) ++{ ++ struct v4l2_loopback_device *dev = from_timer(dev, t, sustain_timer); ++#else ++static void sustain_timer_clb(unsigned long nr) ++{ ++ struct v4l2_loopback_device *dev = ++ idr_find(&v4l2loopback_index_idr, nr); +#endif -+#ifdef V4L2_PIX_FMT_RGB332 -+ { -+ .name = "8 bpp RGB-3-3-2", -+ .fourcc = V4L2_PIX_FMT_RGB332, -+ .depth = 8, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_RGB332 */ -+#ifdef V4L2_PIX_FMT_RGB444 -+ { -+ .name = "16 bpp RGB (xxxxrrrr ggggbbbb)", -+ .fourcc = V4L2_PIX_FMT_RGB444, -+ .depth = 16, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_RGB444 */ -+#ifdef V4L2_PIX_FMT_RGB555 -+ { -+ .name = "16 bpp RGB-5-5-5", -+ .fourcc = V4L2_PIX_FMT_RGB555, -+ .depth = 16, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_RGB555 */ -+#ifdef V4L2_PIX_FMT_RGB565 -+ { -+ .name = "16 bpp RGB-5-6-5", -+ .fourcc = V4L2_PIX_FMT_RGB565, -+ .depth = 16, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_RGB565 */ -+#ifdef V4L2_PIX_FMT_RGB555X -+ { -+ .name = "16 bpp RGB-5-5-5 BE", -+ .fourcc = V4L2_PIX_FMT_RGB555X, -+ .depth = 16, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_RGB555X */ -+#ifdef V4L2_PIX_FMT_RGB565X -+ { -+ .name = "16 bpp RGB-5-6-5 BE", -+ .fourcc = V4L2_PIX_FMT_RGB565X, -+ .depth = 16, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_RGB565X */ -+#ifdef V4L2_PIX_FMT_BGR666 -+ { -+ .name = "18 bpp BGR-6-6-6", -+ .fourcc = V4L2_PIX_FMT_BGR666, -+ .depth = 18, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_BGR666 */ -+ { -+ .name = "4:2:2, packed, YUYV", -+ .fourcc = V4L2_PIX_FMT_YUYV, -+ .depth = 16, -+ .flags = 0, -+ }, -+ { -+ .name = "4:2:2, packed, UYVY", -+ .fourcc = V4L2_PIX_FMT_UYVY, -+ .depth = 16, -+ .flags = 0, -+ }, -+#ifdef V4L2_PIX_FMT_YVYU -+ { -+ .name = "4:2:2, packed YVYU", -+ .fourcc = V4L2_PIX_FMT_YVYU, -+ .depth = 16, -+ .flags = 0, -+ }, ++ spin_lock(&dev->lock); ++ if (dev->sustain_framerate) { ++ dev->reread_count++; ++ dprintkrw("reread: %lld %d\n", (long long)dev->write_position, ++ dev->reread_count); ++ if (dev->reread_count == 1) ++ mod_timer(&dev->sustain_timer, ++ jiffies + max(1UL, dev->frame_jiffies / 2)); ++ else ++ mod_timer(&dev->sustain_timer, ++ jiffies + dev->frame_jiffies); ++ wake_up_all(&dev->read_event); ++ } ++ spin_unlock(&dev->lock); ++} ++#ifdef HAVE_TIMER_SETUP ++static void timeout_timer_clb(struct timer_list *t) ++{ ++ struct v4l2_loopback_device *dev = from_timer(dev, t, timeout_timer); ++#else ++static void timeout_timer_clb(unsigned long nr) ++{ ++ struct v4l2_loopback_device *dev = ++ idr_find(&v4l2loopback_index_idr, nr); +#endif -+#ifdef V4L2_PIX_FMT_VYUY -+ { -+ .name = "4:2:2, packed VYUY", -+ .fourcc = V4L2_PIX_FMT_VYUY, -+ .depth = 16, -+ .flags = 0, -+ }, ++ spin_lock(&dev->lock); ++ if (dev->timeout_jiffies > 0) { ++ dev->timeout_happened = 1; ++ mod_timer(&dev->timeout_timer, jiffies + dev->timeout_jiffies); ++ wake_up_all(&dev->read_event); ++ } ++ spin_unlock(&dev->lock); ++} ++ ++/* init loopback main structure */ ++#define DEFAULT_FROM_CONF(confmember, default_condition, default_value) \ ++ ((conf) ? \ ++ ((conf->confmember default_condition) ? (default_value) : \ ++ (conf->confmember)) : \ ++ default_value) ++ ++static int v4l2_loopback_add(struct v4l2_loopback_config *conf, int *ret_nr) ++{ ++ struct v4l2_loopback_device *dev; ++ struct v4l2_ctrl_handler *hdl; ++ struct v4l2loopback_private *vdev_priv = NULL; ++ ++ int err = -ENOMEM; ++ ++ u32 _width = V4L2LOOPBACK_SIZE_DEFAULT_WIDTH; ++ u32 _height = V4L2LOOPBACK_SIZE_DEFAULT_HEIGHT; ++ ++ u32 _min_width = DEFAULT_FROM_CONF(min_width, ++ < V4L2LOOPBACK_SIZE_MIN_WIDTH, ++ V4L2LOOPBACK_SIZE_MIN_WIDTH); ++ u32 _min_height = DEFAULT_FROM_CONF(min_height, ++ < V4L2LOOPBACK_SIZE_MIN_HEIGHT, ++ V4L2LOOPBACK_SIZE_MIN_HEIGHT); ++ u32 _max_width = DEFAULT_FROM_CONF(max_width, < _min_width, max_width); ++ u32 _max_height = ++ DEFAULT_FROM_CONF(max_height, < _min_height, max_height); ++ bool _announce_all_caps = (conf && conf->announce_all_caps >= 0) ? ++ (conf->announce_all_caps) : ++ V4L2LOOPBACK_DEFAULT_EXCLUSIVECAPS; ++ int _max_buffers = DEFAULT_FROM_CONF(max_buffers, <= 0, max_buffers); ++ int _max_openers = DEFAULT_FROM_CONF(max_openers, <= 0, max_openers); ++ ++ int nr = -1; ++ ++ _announce_all_caps = (!!_announce_all_caps); ++ ++ if (conf) { ++ const int output_nr = conf->output_nr; ++#ifdef SPLIT_DEVICES ++ const int capture_nr = conf->capture_nr; ++#else ++ const int capture_nr = output_nr; +#endif -+ { -+ .name = "4:2:2, packed YYUV", -+ .fourcc = V4L2_PIX_FMT_YYUV, -+ .depth = 16, -+ .flags = 0, -+ }, -+ { -+ .name = "YUV-8-8-8-8", -+ .fourcc = V4L2_PIX_FMT_YUV32, -+ .depth = 32, -+ .flags = 0, -+ }, -+ { -+ .name = "8 bpp, Greyscale", -+ .fourcc = V4L2_PIX_FMT_GREY, -+ .depth = 8, -+ .flags = 0, -+ }, -+#ifdef V4L2_PIX_FMT_Y4 -+ { -+ .name = "4 bpp Greyscale", -+ .fourcc = V4L2_PIX_FMT_Y4, -+ .depth = 4, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_Y4 */ -+#ifdef V4L2_PIX_FMT_Y6 -+ { -+ .name = "6 bpp Greyscale", -+ .fourcc = V4L2_PIX_FMT_Y6, -+ .depth = 6, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_Y6 */ -+#ifdef V4L2_PIX_FMT_Y10 -+ { -+ .name = "10 bpp Greyscale", -+ .fourcc = V4L2_PIX_FMT_Y10, -+ .depth = 10, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_Y10 */ -+#ifdef V4L2_PIX_FMT_Y12 -+ { -+ .name = "12 bpp Greyscale", -+ .fourcc = V4L2_PIX_FMT_Y12, -+ .depth = 12, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_Y12 */ -+ { -+ .name = "16 bpp, Greyscale", -+ .fourcc = V4L2_PIX_FMT_Y16, -+ .depth = 16, -+ .flags = 0, -+ }, -+#ifdef V4L2_PIX_FMT_YUV444 -+ { -+ .name = "16 bpp xxxxyyyy uuuuvvvv", -+ .fourcc = V4L2_PIX_FMT_YUV444, -+ .depth = 16, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_YUV444 */ ++ if (capture_nr >= 0 && output_nr == capture_nr) { ++ nr = output_nr; ++ } else if (capture_nr < 0 && output_nr < 0) { ++ nr = -1; ++ } else if (capture_nr < 0) { ++ nr = output_nr; ++ } else if (output_nr < 0) { ++ nr = capture_nr; ++ } else { ++ printk(KERN_ERR ++ "split OUTPUT and CAPTURE devices not yet supported."); ++ printk(KERN_INFO ++ "both devices must have the same number (%d != %d).", ++ output_nr, capture_nr); ++ return -EINVAL; ++ } ++ } ++ ++ if (idr_find(&v4l2loopback_index_idr, nr)) ++ return -EEXIST; ++ ++ dprintk("creating v4l2loopback-device #%d\n", nr); ++ dev = kzalloc(sizeof(*dev), GFP_KERNEL); ++ if (!dev) ++ return -ENOMEM; ++ ++ /* allocate id, if @id >= 0, we're requesting that specific id */ ++ if (nr >= 0) { ++ err = idr_alloc(&v4l2loopback_index_idr, dev, nr, nr + 1, ++ GFP_KERNEL); ++ if (err == -ENOSPC) ++ err = -EEXIST; ++ } else { ++ err = idr_alloc(&v4l2loopback_index_idr, dev, 0, 0, GFP_KERNEL); ++ } ++ if (err < 0) ++ goto out_free_dev; ++ nr = err; ++ err = -ENOMEM; ++ ++ if (conf && conf->card_label[0]) { ++ snprintf(dev->card_label, sizeof(dev->card_label), "%s", ++ conf->card_label); ++ } else { ++ snprintf(dev->card_label, sizeof(dev->card_label), ++ "Dummy video device (0x%04X)", nr); ++ } ++ snprintf(dev->v4l2_dev.name, sizeof(dev->v4l2_dev.name), ++ "v4l2loopback-%03d", nr); ++ ++ err = v4l2_device_register(NULL, &dev->v4l2_dev); ++ if (err) ++ goto out_free_idr; ++ MARK(); ++ ++ dev->vdev = video_device_alloc(); ++ if (dev->vdev == NULL) { ++ err = -ENOMEM; ++ goto out_unregister; ++ } ++ ++ vdev_priv = kzalloc(sizeof(struct v4l2loopback_private), GFP_KERNEL); ++ if (vdev_priv == NULL) { ++ err = -ENOMEM; ++ goto out_unregister; ++ } ++ ++ video_set_drvdata(dev->vdev, vdev_priv); ++ if (video_get_drvdata(dev->vdev) == NULL) { ++ err = -ENOMEM; ++ goto out_unregister; ++ } ++ ++ MARK(); ++ snprintf(dev->vdev->name, sizeof(dev->vdev->name), "%s", ++ dev->card_label); ++ ++ vdev_priv->device_nr = nr; ++ ++ init_vdev(dev->vdev, nr); ++ dev->vdev->v4l2_dev = &dev->v4l2_dev; ++ init_capture_param(&dev->capture_param); ++ err = set_timeperframe(dev, &dev->capture_param.timeperframe); ++ if (err) ++ goto out_unregister; ++ dev->keep_format = 0; ++ dev->sustain_framerate = 0; ++ ++ dev->announce_all_caps = _announce_all_caps; ++ dev->min_width = _min_width; ++ dev->min_height = _min_height; ++ dev->max_width = _max_width; ++ dev->max_height = _max_height; ++ dev->max_openers = _max_openers; ++ dev->buffers_number = dev->used_buffers = _max_buffers; ++ ++ dev->write_position = 0; ++ ++ MARK(); ++ spin_lock_init(&dev->lock); ++ spin_lock_init(&dev->list_lock); ++ INIT_LIST_HEAD(&dev->outbufs_list); ++ if (list_empty(&dev->outbufs_list)) { ++ int i; ++ ++ for (i = 0; i < dev->used_buffers; ++i) ++ list_add_tail(&dev->buffers[i].list_head, ++ &dev->outbufs_list); ++ } ++ memset(dev->bufpos2index, 0, sizeof(dev->bufpos2index)); ++ atomic_set(&dev->open_count, 0); ++ dev->ready_for_capture = 0; ++ dev->ready_for_output = 1; ++ ++ dev->buffer_size = 0; ++ dev->image = NULL; ++ dev->imagesize = 0; ++#ifdef HAVE_TIMER_SETUP ++ timer_setup(&dev->sustain_timer, sustain_timer_clb, 0); ++ timer_setup(&dev->timeout_timer, timeout_timer_clb, 0); ++#else ++ setup_timer(&dev->sustain_timer, sustain_timer_clb, nr); ++ setup_timer(&dev->timeout_timer, timeout_timer_clb, nr); ++#endif ++ dev->reread_count = 0; ++ dev->timeout_jiffies = 0; ++ dev->timeout_image = NULL; ++ dev->timeout_happened = 0; ++ ++ hdl = &dev->ctrl_handler; ++ err = v4l2_ctrl_handler_init(hdl, 4); ++ if (err) ++ goto out_unregister; ++ v4l2_ctrl_new_custom(hdl, &v4l2loopback_ctrl_keepformat, NULL); ++ v4l2_ctrl_new_custom(hdl, &v4l2loopback_ctrl_sustainframerate, NULL); ++ v4l2_ctrl_new_custom(hdl, &v4l2loopback_ctrl_timeout, NULL); ++ v4l2_ctrl_new_custom(hdl, &v4l2loopback_ctrl_timeoutimageio, NULL); ++ if (hdl->error) { ++ err = hdl->error; ++ goto out_free_handler; ++ } ++ dev->v4l2_dev.ctrl_handler = hdl; ++ ++ err = v4l2_ctrl_handler_setup(hdl); ++ if (err) ++ goto out_free_handler; ++ ++ /* FIXME set buffers to 0 */ ++ ++ /* Set initial format */ ++ if (_width < _min_width) ++ _width = _min_width; ++ if (_width > _max_width) ++ _width = _max_width; ++ if (_height < _min_height) ++ _height = _min_height; ++ if (_height > _max_height) ++ _height = _max_height; ++ ++ dev->pix_format.width = _width; ++ dev->pix_format.height = _height; ++ dev->pix_format.pixelformat = formats[0].fourcc; ++ dev->pix_format.colorspace = ++ V4L2_COLORSPACE_DEFAULT; /* do we need to set this ? */ ++ dev->pix_format.field = V4L2_FIELD_NONE; ++ ++ dev->buffer_size = PAGE_ALIGN(dev->pix_format.sizeimage); ++ dprintk("buffer_size = %ld (=%d)\n", dev->buffer_size, ++ dev->pix_format.sizeimage); ++ ++ if (dev->buffer_size && ((err = allocate_buffers(dev)) < 0)) ++ goto out_free_handler; ++ ++ init_waitqueue_head(&dev->read_event); ++ ++ /* register the device -> it creates /dev/video* */ ++ if (video_register_device(dev->vdev, VFL_TYPE_VIDEO, nr) < 0) { ++ printk(KERN_ERR ++ "v4l2loopback: failed video_register_device()\n"); ++ err = -EFAULT; ++ goto out_free_device; ++ } ++ v4l2loopback_create_sysfs(dev->vdev); ++ ++ MARK(); ++ if (ret_nr) ++ *ret_nr = dev->vdev->num; ++ return 0; ++ ++out_free_device: ++ video_device_release(dev->vdev); ++out_free_handler: ++ v4l2_ctrl_handler_free(&dev->ctrl_handler); ++out_unregister: ++ video_set_drvdata(dev->vdev, NULL); ++ if (vdev_priv != NULL) ++ kfree(vdev_priv); ++ v4l2_device_unregister(&dev->v4l2_dev); ++out_free_idr: ++ idr_remove(&v4l2loopback_index_idr, nr); ++out_free_dev: ++ kfree(dev); ++ return err; ++} ++ ++static void v4l2_loopback_remove(struct v4l2_loopback_device *dev) ++{ ++ free_buffers(dev); ++ v4l2loopback_remove_sysfs(dev->vdev); ++ kfree(video_get_drvdata(dev->vdev)); ++ video_unregister_device(dev->vdev); ++ v4l2_device_unregister(&dev->v4l2_dev); ++ v4l2_ctrl_handler_free(&dev->ctrl_handler); ++ kfree(dev); ++} ++ ++static long v4l2loopback_control_ioctl(struct file *file, unsigned int cmd, ++ unsigned long parm) ++{ ++ struct v4l2_loopback_device *dev; ++ struct v4l2_loopback_config conf; ++ struct v4l2_loopback_config *confptr = &conf; ++ int device_nr, capture_nr, output_nr; ++ int ret; ++ ++ ret = mutex_lock_killable(&v4l2loopback_ctl_mutex); ++ if (ret) ++ return ret; ++ ++ ret = -EINVAL; ++ switch (cmd) { ++ default: ++ ret = -ENOSYS; ++ break; ++ /* add a v4l2loopback device (pair), based on the user-provided specs */ ++ case V4L2LOOPBACK_CTL_ADD: ++ if (parm) { ++ if ((ret = copy_from_user(&conf, (void *)parm, ++ sizeof(conf))) < 0) ++ break; ++ } else ++ confptr = NULL; ++ ret = v4l2_loopback_add(confptr, &device_nr); ++ if (ret >= 0) ++ ret = device_nr; ++ break; ++ /* remove a v4l2loopback device (both capture and output) */ ++ case V4L2LOOPBACK_CTL_REMOVE: ++ ret = v4l2loopback_lookup((int)parm, &dev); ++ if (ret >= 0 && dev) { ++ int nr = ret; ++ ret = -EBUSY; ++ if (dev->open_count.counter > 0) ++ break; ++ idr_remove(&v4l2loopback_index_idr, nr); ++ v4l2_loopback_remove(dev); ++ ret = 0; ++ }; ++ break; ++ /* get information for a loopback device. ++ * this is mostly about limits (which cannot be queried directly with VIDIOC_G_FMT and friends ++ */ ++ case V4L2LOOPBACK_CTL_QUERY: ++ if (!parm) ++ break; ++ if ((ret = copy_from_user(&conf, (void *)parm, sizeof(conf))) < ++ 0) ++ break; ++ capture_nr = output_nr = conf.output_nr; ++#ifdef SPLIT_DEVICES ++ capture_nr = conf.capture_nr; ++#endif ++ device_nr = (output_nr < 0) ? capture_nr : output_nr; ++ MARK(); ++ /* get the device from either capture_nr or output_nr (whatever is valid) */ ++ if ((ret = v4l2loopback_lookup(device_nr, &dev)) < 0) ++ break; ++ MARK(); ++ /* if we got the device from output_nr and there is a valid capture_nr, ++ * make sure that both refer to the same device (or bail out) ++ */ ++ if ((device_nr != capture_nr) && (capture_nr >= 0) && ++ ((ret = v4l2loopback_lookup(capture_nr, 0)) < 0)) ++ break; ++ MARK(); ++ /* if otoh, we got the device from capture_nr and there is a valid output_nr, ++ * make sure that both refer to the same device (or bail out) ++ */ ++ if ((device_nr != output_nr) && (output_nr >= 0) && ++ ((ret = v4l2loopback_lookup(output_nr, 0)) < 0)) ++ break; ++ MARK(); ++ ++ /* v4l2_loopback_config identified a single device, so fetch the data */ ++ snprintf(conf.card_label, sizeof(conf.card_label), "%s", ++ dev->card_label); ++ MARK(); ++ conf.output_nr = dev->vdev->num; ++#ifdef SPLIT_DEVICES ++ conf.capture_nr = dev->vdev->num; ++#endif ++ conf.min_width = dev->min_width; ++ conf.min_height = dev->min_height; ++ conf.max_width = dev->max_width; ++ conf.max_height = dev->max_height; ++ conf.announce_all_caps = dev->announce_all_caps; ++ conf.max_buffers = dev->buffers_number; ++ conf.max_openers = dev->max_openers; ++ conf.debug = debug; ++ MARK(); ++ if (copy_to_user((void *)parm, &conf, sizeof(conf))) { ++ ret = -EFAULT; ++ break; ++ } ++ MARK(); ++ ret = 0; ++ ; ++ break; ++ } ++ ++ MARK(); ++ mutex_unlock(&v4l2loopback_ctl_mutex); ++ MARK(); ++ return ret; ++} ++ ++/* LINUX KERNEL */ ++ ++static const struct file_operations v4l2loopback_ctl_fops = { ++ // clang-format off ++ .owner = THIS_MODULE, ++ .open = nonseekable_open, ++ .unlocked_ioctl = v4l2loopback_control_ioctl, ++ .compat_ioctl = v4l2loopback_control_ioctl, ++ .llseek = noop_llseek, ++ // clang-format on ++}; ++ ++static struct miscdevice v4l2loopback_misc = { ++ // clang-format off ++ .minor = MISC_DYNAMIC_MINOR, ++ .name = "v4l2loopback", ++ .fops = &v4l2loopback_ctl_fops, ++ // clang-format on ++}; ++ ++static const struct v4l2_file_operations v4l2_loopback_fops = { ++ // clang-format off ++ .owner = THIS_MODULE, ++ .open = v4l2_loopback_open, ++ .release = v4l2_loopback_close, ++ .read = v4l2_loopback_read, ++ .write = v4l2_loopback_write, ++ .poll = v4l2_loopback_poll, ++ .mmap = v4l2_loopback_mmap, ++ .unlocked_ioctl = video_ioctl2, ++ // clang-format on ++}; ++ ++static const struct v4l2_ioctl_ops v4l2_loopback_ioctl_ops = { ++ // clang-format off ++ .vidioc_querycap = &vidioc_querycap, ++ .vidioc_enum_framesizes = &vidioc_enum_framesizes, ++ .vidioc_enum_frameintervals = &vidioc_enum_frameintervals, ++ ++ .vidioc_enum_output = &vidioc_enum_output, ++ .vidioc_g_output = &vidioc_g_output, ++ .vidioc_s_output = &vidioc_s_output, ++ ++ .vidioc_enum_input = &vidioc_enum_input, ++ .vidioc_g_input = &vidioc_g_input, ++ .vidioc_s_input = &vidioc_s_input, ++ ++ .vidioc_enum_fmt_vid_cap = &vidioc_enum_fmt_cap, ++ .vidioc_g_fmt_vid_cap = &vidioc_g_fmt_cap, ++ .vidioc_s_fmt_vid_cap = &vidioc_s_fmt_cap, ++ .vidioc_try_fmt_vid_cap = &vidioc_try_fmt_cap, ++ ++ .vidioc_enum_fmt_vid_out = &vidioc_enum_fmt_out, ++ .vidioc_s_fmt_vid_out = &vidioc_s_fmt_out, ++ .vidioc_g_fmt_vid_out = &vidioc_g_fmt_out, ++ .vidioc_try_fmt_vid_out = &vidioc_try_fmt_out, ++ ++#ifdef V4L2L_OVERLAY ++ .vidioc_s_fmt_vid_overlay = &vidioc_s_fmt_overlay, ++ .vidioc_g_fmt_vid_overlay = &vidioc_g_fmt_overlay, ++#endif ++ ++#ifdef V4L2LOOPBACK_WITH_STD ++ .vidioc_s_std = &vidioc_s_std, ++ .vidioc_g_std = &vidioc_g_std, ++ .vidioc_querystd = &vidioc_querystd, ++#endif /* V4L2LOOPBACK_WITH_STD */ ++ ++ .vidioc_g_parm = &vidioc_g_parm, ++ .vidioc_s_parm = &vidioc_s_parm, ++ ++ .vidioc_reqbufs = &vidioc_reqbufs, ++ .vidioc_querybuf = &vidioc_querybuf, ++ .vidioc_qbuf = &vidioc_qbuf, ++ .vidioc_dqbuf = &vidioc_dqbuf, ++ ++ .vidioc_streamon = &vidioc_streamon, ++ .vidioc_streamoff = &vidioc_streamoff, ++ ++#ifdef CONFIG_VIDEO_V4L1_COMPAT ++ .vidiocgmbuf = &vidiocgmbuf, ++#endif ++ ++ .vidioc_subscribe_event = &vidioc_subscribe_event, ++ .vidioc_unsubscribe_event = &v4l2_event_unsubscribe, ++ // clang-format on ++}; ++ ++static int free_device_cb(int id, void *ptr, void *data) ++{ ++ struct v4l2_loopback_device *dev = ptr; ++ v4l2_loopback_remove(dev); ++ return 0; ++} ++static void free_devices(void) ++{ ++ idr_for_each(&v4l2loopback_index_idr, &free_device_cb, NULL); ++ idr_destroy(&v4l2loopback_index_idr); ++} ++ ++static int __init v4l2loopback_init_module(void) ++{ ++ const u32 min_width = V4L2LOOPBACK_SIZE_MIN_WIDTH; ++ const u32 min_height = V4L2LOOPBACK_SIZE_MIN_HEIGHT; ++ int err; ++ int i; ++ MARK(); ++ ++ err = misc_register(&v4l2loopback_misc); ++ if (err < 0) ++ return err; ++ ++ if (devices < 0) { ++ devices = 1; ++ ++ /* try guessing the devices from the "video_nr" parameter */ ++ for (i = MAX_DEVICES - 1; i >= 0; i--) { ++ if (video_nr[i] >= 0) { ++ devices = i + 1; ++ break; ++ } ++ } ++ } ++ ++ if (devices > MAX_DEVICES) { ++ devices = MAX_DEVICES; ++ printk(KERN_INFO ++ "v4l2loopback: number of initial devices is limited to: %d\n", ++ MAX_DEVICES); ++ } ++ ++ if (max_buffers > MAX_BUFFERS) { ++ max_buffers = MAX_BUFFERS; ++ printk(KERN_INFO ++ "v4l2loopback: number of buffers is limited to: %d\n", ++ MAX_BUFFERS); ++ } ++ ++ if (max_openers < 0) { ++ printk(KERN_INFO ++ "v4l2loopback: allowing %d openers rather than %d\n", ++ 2, max_openers); ++ max_openers = 2; ++ } ++ ++ if (max_width < min_width) { ++ max_width = V4L2LOOPBACK_SIZE_DEFAULT_MAX_WIDTH; ++ printk(KERN_INFO "v4l2loopback: using max_width %d\n", ++ max_width); ++ } ++ if (max_height < min_height) { ++ max_height = V4L2LOOPBACK_SIZE_DEFAULT_MAX_HEIGHT; ++ printk(KERN_INFO "v4l2loopback: using max_height %d\n", ++ max_height); ++ } ++ ++ for (i = 0; i < devices; i++) { ++ struct v4l2_loopback_config cfg = { ++ // clang-format off ++ .output_nr = video_nr[i], ++#ifdef SPLIT_DEVICES ++ .capture_nr = video_nr[i], ++#endif ++ .min_width = min_width, ++ .min_height = min_height, ++ .max_width = max_width, ++ .max_height = max_height, ++ .announce_all_caps = (!exclusive_caps[i]), ++ .max_buffers = max_buffers, ++ .max_openers = max_openers, ++ .debug = debug, ++ // clang-format on ++ }; ++ cfg.card_label[0] = 0; ++ if (card_label[i]) ++ snprintf(cfg.card_label, sizeof(cfg.card_label), "%s", ++ card_label[i]); ++ err = v4l2_loopback_add(&cfg, 0); ++ if (err) { ++ free_devices(); ++ goto error; ++ } ++ } ++ ++ dprintk("module installed\n"); ++ ++ printk(KERN_INFO "v4l2loopback driver version %d.%d.%d%s loaded\n", ++ // clang-format off ++ (V4L2LOOPBACK_VERSION_CODE >> 16) & 0xff, ++ (V4L2LOOPBACK_VERSION_CODE >> 8) & 0xff, ++ (V4L2LOOPBACK_VERSION_CODE ) & 0xff, ++#ifdef SNAPSHOT_VERSION ++ " (" __stringify(SNAPSHOT_VERSION) ")" ++#else ++ "" ++#endif ++ ); ++ // clang-format on ++ ++ return 0; ++error: ++ misc_deregister(&v4l2loopback_misc); ++ return err; ++} ++ ++static void v4l2loopback_cleanup_module(void) ++{ ++ MARK(); ++ /* unregister the device -> it deletes /dev/video* */ ++ free_devices(); ++ /* and get rid of /dev/v4l2loopback */ ++ misc_deregister(&v4l2loopback_misc); ++ dprintk("module removed\n"); ++} ++ ++MODULE_ALIAS_MISCDEV(MISC_DYNAMIC_MINOR); ++ ++module_init(v4l2loopback_init_module); ++module_exit(v4l2loopback_cleanup_module); +diff --git a/drivers/media/v4l2-core/v4l2loopback.h b/drivers/media/v4l2-core/v4l2loopback.h +new file mode 100644 +index 000000000000..1bc7e6b747a4 +--- /dev/null ++++ b/drivers/media/v4l2-core/v4l2loopback.h +@@ -0,0 +1,98 @@ ++/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ ++/* ++ * v4l2loopback.h ++ * ++ * Written by IOhannes m zmölnig, 7/1/20. ++ * ++ * Copyright 2020 by IOhannes m zmölnig. Redistribution of this file is ++ * permitted under the GNU General Public License. ++ */ ++#ifndef _V4L2LOOPBACK_H ++#define _V4L2LOOPBACK_H ++ ++#define V4L2LOOPBACK_VERSION_MAJOR 0 ++#define V4L2LOOPBACK_VERSION_MINOR 13 ++#define V4L2LOOPBACK_VERSION_BUGFIX 1 ++ ++/* /dev/v4l2loopback interface */ ++ ++struct v4l2_loopback_config { ++ /** ++ * the device-number (/dev/video) ++ * V4L2LOOPBACK_CTL_ADD: ++ * setting this to a value<0, will allocate an available one ++ * if nr>=0 and the device already exists, the ioctl will EEXIST ++ * if output_nr and capture_nr are the same, only a single device will be created ++ * NOTE: currently split-devices (where output_nr and capture_nr differ) ++ * are not implemented yet. ++ * until then, requesting different device-IDs will result in EINVAL. ++ * ++ * V4L2LOOPBACK_CTL_QUERY: ++ * either both output_nr and capture_nr must refer to the same loopback, ++ * or one (and only one) of them must be -1 ++ * ++ */ ++ int output_nr; ++ int unused; /*capture_nr;*/ ++ ++ /** ++ * a nice name for your device ++ * if (*card_label)==0, an automatic name is assigned ++ */ ++ char card_label[32]; ++ ++ /** ++ * allowed frame size ++ * if too low, default values are used ++ */ ++ unsigned int min_width; ++ unsigned int max_width; ++ unsigned int min_height; ++ unsigned int max_height; ++ ++ /** ++ * number of buffers to allocate for the queue ++ * if set to <=0, default values are used ++ */ ++ int max_buffers; ++ ++ /** ++ * how many consumers are allowed to open this device concurrently ++ * if set to <=0, default values are used ++ */ ++ int max_openers; ++ ++ /** ++ * set the debugging level for this device ++ */ ++ int debug; ++ ++ /** ++ * whether to announce OUTPUT/CAPTURE capabilities exclusively ++ * for this device or not ++ * (!exclusive_caps) ++ * NOTE: this is going to be removed once separate output/capture ++ * devices are implemented ++ */ ++ int announce_all_caps; ++}; ++ ++/* a pointer to a (struct v4l2_loopback_config) that has all values you wish to impose on the ++ * to-be-created device set. ++ * if the ptr is NULL, a new device is created with default values at the driver's discretion. ++ * ++ * returns the device_nr of the OUTPUT device (which can be used with V4L2LOOPBACK_CTL_QUERY, ++ * to get more information on the device) ++ */ ++#define V4L2LOOPBACK_CTL_ADD 0x4C80 ++ ++/* a pointer to a (struct v4l2_loopback_config) that has output_nr and/or capture_nr set ++ * (the two values must either refer to video-devices associated with the same loopback device ++ * or exactly one of them must be <0 ++ */ ++#define V4L2LOOPBACK_CTL_QUERY 0x4C82 ++ ++/* the device-number (either CAPTURE or OUTPUT) associated with the loopback-device */ ++#define V4L2LOOPBACK_CTL_REMOVE 0x4C81 ++ ++#endif /* _V4L2LOOPBACK_H */ +diff --git a/drivers/media/v4l2-core/v4l2loopback_formats.h b/drivers/media/v4l2-core/v4l2loopback_formats.h +new file mode 100644 +index 000000000000..d855a3796554 +--- /dev/null ++++ b/drivers/media/v4l2-core/v4l2loopback_formats.h +@@ -0,0 +1,445 @@ ++static const struct v4l2l_format formats[] = { ++#ifndef V4L2_PIX_FMT_VP9 ++#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') ++#endif ++#ifndef V4L2_PIX_FMT_HEVC ++#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') ++#endif ++ ++ /* here come the packed formats */ ++ { ++ .name = "32 bpp RGB, le", ++ .fourcc = V4L2_PIX_FMT_BGR32, ++ .depth = 32, ++ .flags = 0, ++ }, ++ { ++ .name = "32 bpp RGB, be", ++ .fourcc = V4L2_PIX_FMT_RGB32, ++ .depth = 32, ++ .flags = 0, ++ }, ++ { ++ .name = "24 bpp RGB, le", ++ .fourcc = V4L2_PIX_FMT_BGR24, ++ .depth = 24, ++ .flags = 0, ++ }, ++ { ++ .name = "24 bpp RGB, be", ++ .fourcc = V4L2_PIX_FMT_RGB24, ++ .depth = 24, ++ .flags = 0, ++ }, ++#ifdef V4L2_PIX_FMT_ABGR32 ++ { ++ .name = "32 bpp RGBA, le", ++ .fourcc = V4L2_PIX_FMT_ABGR32, ++ .depth = 32, ++ .flags = 0, ++ }, ++#endif ++#ifdef V4L2_PIX_FMT_RGBA32 ++ { ++ .name = "32 bpp RGBA", ++ .fourcc = V4L2_PIX_FMT_RGBA32, ++ .depth = 32, ++ .flags = 0, ++ }, ++#endif ++#ifdef V4L2_PIX_FMT_RGB332 ++ { ++ .name = "8 bpp RGB-3-3-2", ++ .fourcc = V4L2_PIX_FMT_RGB332, ++ .depth = 8, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_RGB332 */ ++#ifdef V4L2_PIX_FMT_RGB444 ++ { ++ .name = "16 bpp RGB (xxxxrrrr ggggbbbb)", ++ .fourcc = V4L2_PIX_FMT_RGB444, ++ .depth = 16, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_RGB444 */ ++#ifdef V4L2_PIX_FMT_RGB555 ++ { ++ .name = "16 bpp RGB-5-5-5", ++ .fourcc = V4L2_PIX_FMT_RGB555, ++ .depth = 16, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_RGB555 */ ++#ifdef V4L2_PIX_FMT_RGB565 ++ { ++ .name = "16 bpp RGB-5-6-5", ++ .fourcc = V4L2_PIX_FMT_RGB565, ++ .depth = 16, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_RGB565 */ ++#ifdef V4L2_PIX_FMT_RGB555X ++ { ++ .name = "16 bpp RGB-5-5-5 BE", ++ .fourcc = V4L2_PIX_FMT_RGB555X, ++ .depth = 16, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_RGB555X */ ++#ifdef V4L2_PIX_FMT_RGB565X ++ { ++ .name = "16 bpp RGB-5-6-5 BE", ++ .fourcc = V4L2_PIX_FMT_RGB565X, ++ .depth = 16, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_RGB565X */ ++#ifdef V4L2_PIX_FMT_BGR666 ++ { ++ .name = "18 bpp BGR-6-6-6", ++ .fourcc = V4L2_PIX_FMT_BGR666, ++ .depth = 18, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_BGR666 */ ++ { ++ .name = "4:2:2, packed, YUYV", ++ .fourcc = V4L2_PIX_FMT_YUYV, ++ .depth = 16, ++ .flags = 0, ++ }, ++ { ++ .name = "4:2:2, packed, UYVY", ++ .fourcc = V4L2_PIX_FMT_UYVY, ++ .depth = 16, ++ .flags = 0, ++ }, ++#ifdef V4L2_PIX_FMT_YVYU ++ { ++ .name = "4:2:2, packed YVYU", ++ .fourcc = V4L2_PIX_FMT_YVYU, ++ .depth = 16, ++ .flags = 0, ++ }, ++#endif ++#ifdef V4L2_PIX_FMT_VYUY ++ { ++ .name = "4:2:2, packed VYUY", ++ .fourcc = V4L2_PIX_FMT_VYUY, ++ .depth = 16, ++ .flags = 0, ++ }, ++#endif ++ { ++ .name = "4:2:2, packed YYUV", ++ .fourcc = V4L2_PIX_FMT_YYUV, ++ .depth = 16, ++ .flags = 0, ++ }, ++ { ++ .name = "YUV-8-8-8-8", ++ .fourcc = V4L2_PIX_FMT_YUV32, ++ .depth = 32, ++ .flags = 0, ++ }, ++ { ++ .name = "8 bpp, Greyscale", ++ .fourcc = V4L2_PIX_FMT_GREY, ++ .depth = 8, ++ .flags = 0, ++ }, ++#ifdef V4L2_PIX_FMT_Y4 ++ { ++ .name = "4 bpp Greyscale", ++ .fourcc = V4L2_PIX_FMT_Y4, ++ .depth = 4, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_Y4 */ ++#ifdef V4L2_PIX_FMT_Y6 ++ { ++ .name = "6 bpp Greyscale", ++ .fourcc = V4L2_PIX_FMT_Y6, ++ .depth = 6, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_Y6 */ ++#ifdef V4L2_PIX_FMT_Y10 ++ { ++ .name = "10 bpp Greyscale", ++ .fourcc = V4L2_PIX_FMT_Y10, ++ .depth = 10, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_Y10 */ ++#ifdef V4L2_PIX_FMT_Y12 ++ { ++ .name = "12 bpp Greyscale", ++ .fourcc = V4L2_PIX_FMT_Y12, ++ .depth = 12, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_Y12 */ ++ { ++ .name = "16 bpp, Greyscale", ++ .fourcc = V4L2_PIX_FMT_Y16, ++ .depth = 16, ++ .flags = 0, ++ }, ++#ifdef V4L2_PIX_FMT_YUV444 ++ { ++ .name = "16 bpp xxxxyyyy uuuuvvvv", ++ .fourcc = V4L2_PIX_FMT_YUV444, ++ .depth = 16, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_YUV444 */ +#ifdef V4L2_PIX_FMT_YUV555 + { + .name = "16 bpp YUV-5-5-5", @@ -12390,5427 +17766,11342 @@ index 000000000000..d855a3796554 +#endif /* V4L2_PIX_FMT_YUV555 */ +#ifdef V4L2_PIX_FMT_YUV565 + { -+ .name = "16 bpp YUV-5-6-5", -+ .fourcc = V4L2_PIX_FMT_YUV565, -+ .depth = 16, -+ .flags = 0, ++ .name = "16 bpp YUV-5-6-5", ++ .fourcc = V4L2_PIX_FMT_YUV565, ++ .depth = 16, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_YUV565 */ ++ ++/* bayer formats */ ++#ifdef V4L2_PIX_FMT_SRGGB8 ++ { ++ .name = "Bayer RGGB 8bit", ++ .fourcc = V4L2_PIX_FMT_SRGGB8, ++ .depth = 8, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_SRGGB8 */ ++#ifdef V4L2_PIX_FMT_SGRBG8 ++ { ++ .name = "Bayer GRBG 8bit", ++ .fourcc = V4L2_PIX_FMT_SGRBG8, ++ .depth = 8, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_SGRBG8 */ ++#ifdef V4L2_PIX_FMT_SGBRG8 ++ { ++ .name = "Bayer GBRG 8bit", ++ .fourcc = V4L2_PIX_FMT_SGBRG8, ++ .depth = 8, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_SGBRG8 */ ++#ifdef V4L2_PIX_FMT_SBGGR8 ++ { ++ .name = "Bayer BA81 8bit", ++ .fourcc = V4L2_PIX_FMT_SBGGR8, ++ .depth = 8, ++ .flags = 0, ++ }, ++#endif /* V4L2_PIX_FMT_SBGGR8 */ ++ ++ /* here come the planar formats */ ++ { ++ .name = "4:1:0, planar, Y-Cr-Cb", ++ .fourcc = V4L2_PIX_FMT_YVU410, ++ .depth = 9, ++ .flags = FORMAT_FLAGS_PLANAR, ++ }, ++ { ++ .name = "4:2:0, planar, Y-Cr-Cb", ++ .fourcc = V4L2_PIX_FMT_YVU420, ++ .depth = 12, ++ .flags = FORMAT_FLAGS_PLANAR, ++ }, ++ { ++ .name = "4:1:0, planar, Y-Cb-Cr", ++ .fourcc = V4L2_PIX_FMT_YUV410, ++ .depth = 9, ++ .flags = FORMAT_FLAGS_PLANAR, ++ }, ++ { ++ .name = "4:2:0, planar, Y-Cb-Cr", ++ .fourcc = V4L2_PIX_FMT_YUV420, ++ .depth = 12, ++ .flags = FORMAT_FLAGS_PLANAR, ++ }, ++#ifdef V4L2_PIX_FMT_YUV422P ++ { ++ .name = "16 bpp YVU422 planar", ++ .fourcc = V4L2_PIX_FMT_YUV422P, ++ .depth = 16, ++ .flags = FORMAT_FLAGS_PLANAR, ++ }, ++#endif /* V4L2_PIX_FMT_YUV422P */ ++#ifdef V4L2_PIX_FMT_YUV411P ++ { ++ .name = "16 bpp YVU411 planar", ++ .fourcc = V4L2_PIX_FMT_YUV411P, ++ .depth = 16, ++ .flags = FORMAT_FLAGS_PLANAR, ++ }, ++#endif /* V4L2_PIX_FMT_YUV411P */ ++#ifdef V4L2_PIX_FMT_Y41P ++ { ++ .name = "12 bpp YUV 4:1:1", ++ .fourcc = V4L2_PIX_FMT_Y41P, ++ .depth = 12, ++ .flags = FORMAT_FLAGS_PLANAR, ++ }, ++#endif /* V4L2_PIX_FMT_Y41P */ ++#ifdef V4L2_PIX_FMT_NV12 ++ { ++ .name = "12 bpp Y/CbCr 4:2:0 ", ++ .fourcc = V4L2_PIX_FMT_NV12, ++ .depth = 12, ++ .flags = FORMAT_FLAGS_PLANAR, ++ }, ++#endif /* V4L2_PIX_FMT_NV12 */ ++ ++/* here come the compressed formats */ ++ ++#ifdef V4L2_PIX_FMT_MJPEG ++ { ++ .name = "Motion-JPEG", ++ .fourcc = V4L2_PIX_FMT_MJPEG, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_MJPEG */ ++#ifdef V4L2_PIX_FMT_JPEG ++ { ++ .name = "JFIF JPEG", ++ .fourcc = V4L2_PIX_FMT_JPEG, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_JPEG */ ++#ifdef V4L2_PIX_FMT_DV ++ { ++ .name = "DV1394", ++ .fourcc = V4L2_PIX_FMT_DV, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_DV */ ++#ifdef V4L2_PIX_FMT_MPEG ++ { ++ .name = "MPEG-1/2/4 Multiplexed", ++ .fourcc = V4L2_PIX_FMT_MPEG, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_MPEG */ ++#ifdef V4L2_PIX_FMT_H264 ++ { ++ .name = "H264 with start codes", ++ .fourcc = V4L2_PIX_FMT_H264, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_H264 */ ++#ifdef V4L2_PIX_FMT_H264_NO_SC ++ { ++ .name = "H264 without start codes", ++ .fourcc = V4L2_PIX_FMT_H264_NO_SC, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_H264_NO_SC */ ++#ifdef V4L2_PIX_FMT_H264_MVC ++ { ++ .name = "H264 MVC", ++ .fourcc = V4L2_PIX_FMT_H264_MVC, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_H264_MVC */ ++#ifdef V4L2_PIX_FMT_H263 ++ { ++ .name = "H263", ++ .fourcc = V4L2_PIX_FMT_H263, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_H263 */ ++#ifdef V4L2_PIX_FMT_MPEG1 ++ { ++ .name = "MPEG-1 ES", ++ .fourcc = V4L2_PIX_FMT_MPEG1, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_MPEG1 */ ++#ifdef V4L2_PIX_FMT_MPEG2 ++ { ++ .name = "MPEG-2 ES", ++ .fourcc = V4L2_PIX_FMT_MPEG2, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_MPEG2 */ ++#ifdef V4L2_PIX_FMT_MPEG4 ++ { ++ .name = "MPEG-4 part 2 ES", ++ .fourcc = V4L2_PIX_FMT_MPEG4, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_MPEG4 */ ++#ifdef V4L2_PIX_FMT_XVID ++ { ++ .name = "Xvid", ++ .fourcc = V4L2_PIX_FMT_XVID, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_XVID */ ++#ifdef V4L2_PIX_FMT_VC1_ANNEX_G ++ { ++ .name = "SMPTE 421M Annex G compliant stream", ++ .fourcc = V4L2_PIX_FMT_VC1_ANNEX_G, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_VC1_ANNEX_G */ ++#ifdef V4L2_PIX_FMT_VC1_ANNEX_L ++ { ++ .name = "SMPTE 421M Annex L compliant stream", ++ .fourcc = V4L2_PIX_FMT_VC1_ANNEX_L, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_VC1_ANNEX_L */ ++#ifdef V4L2_PIX_FMT_VP8 ++ { ++ .name = "VP8", ++ .fourcc = V4L2_PIX_FMT_VP8, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_VP8 */ ++#ifdef V4L2_PIX_FMT_VP9 ++ { ++ .name = "VP9", ++ .fourcc = V4L2_PIX_FMT_VP9, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_VP9 */ ++#ifdef V4L2_PIX_FMT_HEVC ++ { ++ .name = "HEVC", ++ .fourcc = V4L2_PIX_FMT_HEVC, ++ .depth = 32, ++ .flags = FORMAT_FLAGS_COMPRESSED, ++ }, ++#endif /* V4L2_PIX_FMT_HEVC */ ++}; +diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig +index ae23b317a64e..7417c480275a 100644 +--- a/drivers/mfd/Kconfig ++++ b/drivers/mfd/Kconfig +@@ -2414,5 +2414,16 @@ config MFD_RSMU_SPI + Additional drivers must be enabled in order to use the functionality + of the device. + ++config MFD_STEAMDECK ++ tristate "Valve Steam Deck" ++ select MFD_CORE ++ depends on ACPI ++ depends on X86_64 || COMPILE_TEST ++ help ++ This driver registers various MFD cells that expose aspects ++ of Steam Deck specific ACPI functionality. ++ ++ Say N here, unless you are running on Steam Deck hardware. ++ + endmenu + endif +diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile +index e057d6d6faef..2f9f8a0daf5e 100644 +--- a/drivers/mfd/Makefile ++++ b/drivers/mfd/Makefile +@@ -290,3 +290,5 @@ obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o + + obj-$(CONFIG_MFD_RSMU_I2C) += rsmu_i2c.o rsmu_core.o + obj-$(CONFIG_MFD_RSMU_SPI) += rsmu_spi.o rsmu_core.o ++ ++obj-$(CONFIG_MFD_STEAMDECK) += steamdeck.o +diff --git a/drivers/mfd/steamdeck.c b/drivers/mfd/steamdeck.c +new file mode 100644 +index 000000000000..a60fa7db9141 +--- /dev/null ++++ b/drivers/mfd/steamdeck.c +@@ -0,0 +1,147 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++ ++/* ++ * Steam Deck EC MFD core driver ++ * ++ * Copyright (C) 2021-2022 Valve Corporation ++ * ++ */ ++ ++#include ++#include ++#include ++ ++#define STEAMDECK_STA_OK \ ++ (ACPI_STA_DEVICE_ENABLED | \ ++ ACPI_STA_DEVICE_PRESENT | \ ++ ACPI_STA_DEVICE_FUNCTIONING) ++ ++struct steamdeck { ++ struct acpi_device *adev; ++ struct device *dev; ++}; ++ ++#define STEAMDECK_ATTR_RO(_name, _method) \ ++ static ssize_t _name##_show(struct device *dev, \ ++ struct device_attribute *attr, \ ++ char *buf) \ ++ { \ ++ struct steamdeck *sd = dev_get_drvdata(dev); \ ++ unsigned long long val; \ ++ \ ++ if (ACPI_FAILURE(acpi_evaluate_integer( \ ++ sd->adev->handle, \ ++ _method, NULL, &val))) \ ++ return -EIO; \ ++ \ ++ return sysfs_emit(buf, "%llu\n", val); \ ++ } \ ++ static DEVICE_ATTR_RO(_name) ++ ++STEAMDECK_ATTR_RO(firmware_version, "PDFW"); ++STEAMDECK_ATTR_RO(board_id, "BOID"); ++ ++static ssize_t controller_board_power_store(struct device *dev, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct steamdeck *sd = dev_get_drvdata(dev); ++ bool enabled; ++ ssize_t ret = kstrtobool(buf, &enabled); ++ ++ if (ret) ++ return ret; ++ ++ if (ACPI_FAILURE(acpi_execute_simple_method(sd->adev->handle, ++ "SCBP", enabled))) ++ return -EIO; ++ ++ return count; ++} ++static DEVICE_ATTR_WO(controller_board_power); ++ ++static struct attribute *steamdeck_attrs[] = { ++ &dev_attr_firmware_version.attr, ++ &dev_attr_board_id.attr, ++ &dev_attr_controller_board_power.attr, ++ NULL ++}; ++ ++ATTRIBUTE_GROUPS(steamdeck); ++ ++static const struct mfd_cell steamdeck_cells[] = { ++ { .name = "steamdeck-hwmon" }, ++ { .name = "steamdeck-leds" }, ++ { .name = "steamdeck-extcon" }, ++}; ++ ++static void steamdeck_remove_sysfs_groups(void *data) ++{ ++ struct steamdeck *sd = data; ++ ++ sysfs_remove_groups(&sd->dev->kobj, steamdeck_groups); ++} ++ ++static int steamdeck_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ unsigned long long sta; ++ struct steamdeck *sd; ++ acpi_status status; ++ int ret; ++ ++ sd = devm_kzalloc(dev, sizeof(*sd), GFP_KERNEL); ++ if (!sd) ++ return -ENOMEM; ++ sd->adev = ACPI_COMPANION(dev); ++ sd->dev = dev; ++ platform_set_drvdata(pdev, sd); ++ ++ status = acpi_evaluate_integer(sd->adev->handle, "_STA", ++ NULL, &sta); ++ if (ACPI_FAILURE(status)) { ++ dev_err(dev, "Status check failed (0x%x)\n", status); ++ return -EINVAL; ++ } ++ ++ if ((sta & STEAMDECK_STA_OK) != STEAMDECK_STA_OK) { ++ dev_err(dev, "Device is not ready\n"); ++ return -EINVAL; ++ } ++ ++ ret = sysfs_create_groups(&dev->kobj, steamdeck_groups); ++ if (ret) { ++ dev_err(dev, "Failed to create sysfs group\n"); ++ return ret; ++ } ++ ++ ret = devm_add_action_or_reset(dev, steamdeck_remove_sysfs_groups, ++ sd); ++ if (ret) { ++ dev_err(dev, "Failed to register devres action\n"); ++ return ret; ++ } ++ ++ return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, ++ steamdeck_cells, ARRAY_SIZE(steamdeck_cells), ++ NULL, 0, NULL); ++} ++ ++static const struct acpi_device_id steamdeck_device_ids[] = { ++ { "VLV0100", 0 }, ++ { "", 0 }, ++}; ++MODULE_DEVICE_TABLE(acpi, steamdeck_device_ids); ++ ++static struct platform_driver steamdeck_driver = { ++ .probe = steamdeck_probe, ++ .driver = { ++ .name = "steamdeck", ++ .acpi_match_table = steamdeck_device_ids, ++ }, ++}; ++module_platform_driver(steamdeck_driver); ++ ++MODULE_AUTHOR("Andrey Smirnov "); ++MODULE_DESCRIPTION("Steam Deck EC MFD core driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig +index 09cbe3f0ab1e..fb772bfe27c3 100644 +--- a/drivers/misc/Kconfig ++++ b/drivers/misc/Kconfig +@@ -517,7 +517,6 @@ config OPEN_DICE + + config NTSYNC + tristate "NT synchronization primitive emulation" +- depends on BROKEN + help + This module provides kernel support for emulation of Windows NT + synchronization primitives. It is not a hardware driver. +diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c +index 4954553b7baa..457ff28b789f 100644 +--- a/drivers/misc/ntsync.c ++++ b/drivers/misc/ntsync.c +@@ -6,11 +6,17 @@ + */ + + #include ++#include + #include + #include ++#include ++#include + #include + #include ++#include + #include ++#include ++#include + #include + #include + #include +@@ -19,6 +25,8 @@ + + enum ntsync_type { + NTSYNC_TYPE_SEM, ++ NTSYNC_TYPE_MUTEX, ++ NTSYNC_TYPE_EVENT, + }; + + /* +@@ -30,10 +38,13 @@ enum ntsync_type { + * + * Both rely on struct file for reference counting. Individual + * ntsync_obj objects take a reference to the device when created. ++ * Wait operations take a reference to each object being waited on for ++ * the duration of the wait. + */ + + struct ntsync_obj { + spinlock_t lock; ++ int dev_locked; + + enum ntsync_type type; + +@@ -46,22 +57,344 @@ struct ntsync_obj { + __u32 count; + __u32 max; + } sem; ++ struct { ++ __u32 count; ++ pid_t owner; ++ bool ownerdead; ++ } mutex; ++ struct { ++ bool manual; ++ bool signaled; ++ } event; + } u; ++ ++ /* ++ * any_waiters is protected by the object lock, but all_waiters is ++ * protected by the device wait_all_lock. ++ */ ++ struct list_head any_waiters; ++ struct list_head all_waiters; ++ ++ /* ++ * Hint describing how many tasks are queued on this object in a ++ * wait-all operation. ++ * ++ * Any time we do a wake, we may need to wake "all" waiters as well as ++ * "any" waiters. In order to atomically wake "all" waiters, we must ++ * lock all of the objects, and that means grabbing the wait_all_lock ++ * below (and, due to lock ordering rules, before locking this object). ++ * However, wait-all is a rare operation, and grabbing the wait-all ++ * lock for every wake would create unnecessary contention. ++ * Therefore we first check whether all_hint is zero, and, if it is, ++ * we skip trying to wake "all" waiters. ++ * ++ * Since wait requests must originate from user-space threads, we're ++ * limited here by PID_MAX_LIMIT, so there's no risk of overflow. ++ */ ++ atomic_t all_hint; ++}; ++ ++struct ntsync_q_entry { ++ struct list_head node; ++ struct ntsync_q *q; ++ struct ntsync_obj *obj; ++ __u32 index; ++}; ++ ++struct ntsync_q { ++ struct task_struct *task; ++ __u32 owner; ++ ++ /* ++ * Protected via atomic_try_cmpxchg(). Only the thread that wins the ++ * compare-and-swap may actually change object states and wake this ++ * task. ++ */ ++ atomic_t signaled; ++ ++ bool all; ++ bool ownerdead; ++ __u32 count; ++ struct ntsync_q_entry entries[]; + }; + + struct ntsync_device { ++ /* ++ * Wait-all operations must atomically grab all objects, and be totally ++ * ordered with respect to each other and wait-any operations. ++ * If one thread is trying to acquire several objects, another thread ++ * cannot touch the object at the same time. ++ * ++ * This device-wide lock is used to serialize wait-for-all ++ * operations, and operations on an object that is involved in a ++ * wait-for-all. ++ */ ++ struct mutex wait_all_lock; ++ + struct file *file; + }; + ++/* ++ * Single objects are locked using obj->lock. ++ * ++ * Multiple objects are 'locked' while holding dev->wait_all_lock. ++ * In this case however, individual objects are not locked by holding ++ * obj->lock, but by setting obj->dev_locked. ++ * ++ * This means that in order to lock a single object, the sequence is slightly ++ * more complicated than usual. Specifically it needs to check obj->dev_locked ++ * after acquiring obj->lock, if set, it needs to drop the lock and acquire ++ * dev->wait_all_lock in order to serialize against the multi-object operation. ++ */ ++ ++static void dev_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) ++{ ++ lockdep_assert_held(&dev->wait_all_lock); ++ lockdep_assert(obj->dev == dev); ++ spin_lock(&obj->lock); ++ /* ++ * By setting obj->dev_locked inside obj->lock, it is ensured that ++ * anyone holding obj->lock must see the value. ++ */ ++ obj->dev_locked = 1; ++ spin_unlock(&obj->lock); ++} ++ ++static void dev_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) ++{ ++ lockdep_assert_held(&dev->wait_all_lock); ++ lockdep_assert(obj->dev == dev); ++ spin_lock(&obj->lock); ++ obj->dev_locked = 0; ++ spin_unlock(&obj->lock); ++} ++ ++static void obj_lock(struct ntsync_obj *obj) ++{ ++ struct ntsync_device *dev = obj->dev; ++ ++ for (;;) { ++ spin_lock(&obj->lock); ++ if (likely(!obj->dev_locked)) ++ break; ++ ++ spin_unlock(&obj->lock); ++ mutex_lock(&dev->wait_all_lock); ++ spin_lock(&obj->lock); ++ /* ++ * obj->dev_locked should be set and released under the same ++ * wait_all_lock section, since we now own this lock, it should ++ * be clear. ++ */ ++ lockdep_assert(!obj->dev_locked); ++ spin_unlock(&obj->lock); ++ mutex_unlock(&dev->wait_all_lock); ++ } ++} ++ ++static void obj_unlock(struct ntsync_obj *obj) ++{ ++ spin_unlock(&obj->lock); ++} ++ ++static bool ntsync_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) ++{ ++ bool all; ++ ++ obj_lock(obj); ++ all = atomic_read(&obj->all_hint); ++ if (unlikely(all)) { ++ obj_unlock(obj); ++ mutex_lock(&dev->wait_all_lock); ++ dev_lock_obj(dev, obj); ++ } ++ ++ return all; ++} ++ ++static void ntsync_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj, bool all) ++{ ++ if (all) { ++ dev_unlock_obj(dev, obj); ++ mutex_unlock(&dev->wait_all_lock); ++ } else { ++ obj_unlock(obj); ++ } ++} ++ ++#define ntsync_assert_held(obj) \ ++ lockdep_assert((lockdep_is_held(&(obj)->lock) != LOCK_STATE_NOT_HELD) || \ ++ ((lockdep_is_held(&(obj)->dev->wait_all_lock) != LOCK_STATE_NOT_HELD) && \ ++ (obj)->dev_locked)) ++ ++static bool is_signaled(struct ntsync_obj *obj, __u32 owner) ++{ ++ ntsync_assert_held(obj); ++ ++ switch (obj->type) { ++ case NTSYNC_TYPE_SEM: ++ return !!obj->u.sem.count; ++ case NTSYNC_TYPE_MUTEX: ++ if (obj->u.mutex.owner && obj->u.mutex.owner != owner) ++ return false; ++ return obj->u.mutex.count < UINT_MAX; ++ case NTSYNC_TYPE_EVENT: ++ return obj->u.event.signaled; ++ } ++ ++ WARN(1, "bad object type %#x\n", obj->type); ++ return false; ++} ++ ++/* ++ * "locked_obj" is an optional pointer to an object which is already locked and ++ * should not be locked again. This is necessary so that changing an object's ++ * state and waking it can be a single atomic operation. ++ */ ++static void try_wake_all(struct ntsync_device *dev, struct ntsync_q *q, ++ struct ntsync_obj *locked_obj) ++{ ++ __u32 count = q->count; ++ bool can_wake = true; ++ int signaled = -1; ++ __u32 i; ++ ++ lockdep_assert_held(&dev->wait_all_lock); ++ if (locked_obj) ++ lockdep_assert(locked_obj->dev_locked); ++ ++ for (i = 0; i < count; i++) { ++ if (q->entries[i].obj != locked_obj) ++ dev_lock_obj(dev, q->entries[i].obj); ++ } ++ ++ for (i = 0; i < count; i++) { ++ if (!is_signaled(q->entries[i].obj, q->owner)) { ++ can_wake = false; ++ break; ++ } ++ } ++ ++ if (can_wake && atomic_try_cmpxchg(&q->signaled, &signaled, 0)) { ++ for (i = 0; i < count; i++) { ++ struct ntsync_obj *obj = q->entries[i].obj; ++ ++ switch (obj->type) { ++ case NTSYNC_TYPE_SEM: ++ obj->u.sem.count--; ++ break; ++ case NTSYNC_TYPE_MUTEX: ++ if (obj->u.mutex.ownerdead) ++ q->ownerdead = true; ++ obj->u.mutex.ownerdead = false; ++ obj->u.mutex.count++; ++ obj->u.mutex.owner = q->owner; ++ break; ++ case NTSYNC_TYPE_EVENT: ++ if (!obj->u.event.manual) ++ obj->u.event.signaled = false; ++ break; ++ } ++ } ++ wake_up_process(q->task); ++ } ++ ++ for (i = 0; i < count; i++) { ++ if (q->entries[i].obj != locked_obj) ++ dev_unlock_obj(dev, q->entries[i].obj); ++ } ++} ++ ++static void try_wake_all_obj(struct ntsync_device *dev, struct ntsync_obj *obj) ++{ ++ struct ntsync_q_entry *entry; ++ ++ lockdep_assert_held(&dev->wait_all_lock); ++ lockdep_assert(obj->dev_locked); ++ ++ list_for_each_entry(entry, &obj->all_waiters, node) ++ try_wake_all(dev, entry->q, obj); ++} ++ ++static void try_wake_any_sem(struct ntsync_obj *sem) ++{ ++ struct ntsync_q_entry *entry; ++ ++ ntsync_assert_held(sem); ++ lockdep_assert(sem->type == NTSYNC_TYPE_SEM); ++ ++ list_for_each_entry(entry, &sem->any_waiters, node) { ++ struct ntsync_q *q = entry->q; ++ int signaled = -1; ++ ++ if (!sem->u.sem.count) ++ break; ++ ++ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) { ++ sem->u.sem.count--; ++ wake_up_process(q->task); ++ } ++ } ++} ++ ++static void try_wake_any_mutex(struct ntsync_obj *mutex) ++{ ++ struct ntsync_q_entry *entry; ++ ++ ntsync_assert_held(mutex); ++ lockdep_assert(mutex->type == NTSYNC_TYPE_MUTEX); ++ ++ list_for_each_entry(entry, &mutex->any_waiters, node) { ++ struct ntsync_q *q = entry->q; ++ int signaled = -1; ++ ++ if (mutex->u.mutex.count == UINT_MAX) ++ break; ++ if (mutex->u.mutex.owner && mutex->u.mutex.owner != q->owner) ++ continue; ++ ++ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) { ++ if (mutex->u.mutex.ownerdead) ++ q->ownerdead = true; ++ mutex->u.mutex.ownerdead = false; ++ mutex->u.mutex.count++; ++ mutex->u.mutex.owner = q->owner; ++ wake_up_process(q->task); ++ } ++ } ++} ++ ++static void try_wake_any_event(struct ntsync_obj *event) ++{ ++ struct ntsync_q_entry *entry; ++ ++ ntsync_assert_held(event); ++ lockdep_assert(event->type == NTSYNC_TYPE_EVENT); ++ ++ list_for_each_entry(entry, &event->any_waiters, node) { ++ struct ntsync_q *q = entry->q; ++ int signaled = -1; ++ ++ if (!event->u.event.signaled) ++ break; ++ ++ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) { ++ if (!event->u.event.manual) ++ event->u.event.signaled = false; ++ wake_up_process(q->task); ++ } ++ } ++} ++ + /* + * Actually change the semaphore state, returning -EOVERFLOW if it is made + * invalid. + */ +-static int post_sem_state(struct ntsync_obj *sem, __u32 count) ++static int release_sem_state(struct ntsync_obj *sem, __u32 count) + { + __u32 sum; + +- lockdep_assert_held(&sem->lock); ++ ntsync_assert_held(sem); + + if (check_add_overflow(sem->u.sem.count, count, &sum) || + sum > sem->u.sem.max) +@@ -71,11 +404,13 @@ static int post_sem_state(struct ntsync_obj *sem, __u32 count) + return 0; + } + +-static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp) ++static int ntsync_sem_release(struct ntsync_obj *sem, void __user *argp) + { ++ struct ntsync_device *dev = sem->dev; + __u32 __user *user_args = argp; + __u32 prev_count; + __u32 args; ++ bool all; + int ret; + + if (copy_from_user(&args, argp, sizeof(args))) +@@ -84,12 +419,17 @@ static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp) + if (sem->type != NTSYNC_TYPE_SEM) + return -EINVAL; + +- spin_lock(&sem->lock); ++ all = ntsync_lock_obj(dev, sem); + + prev_count = sem->u.sem.count; +- ret = post_sem_state(sem, args); ++ ret = release_sem_state(sem, args); ++ if (!ret) { ++ if (all) ++ try_wake_all_obj(dev, sem); ++ try_wake_any_sem(sem); ++ } + +- spin_unlock(&sem->lock); ++ ntsync_unlock_obj(dev, sem, all); + + if (!ret && put_user(prev_count, user_args)) + ret = -EFAULT; +@@ -97,6 +437,220 @@ static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp) + return ret; + } + ++/* ++ * Actually change the mutex state, returning -EPERM if not the owner. ++ */ ++static int unlock_mutex_state(struct ntsync_obj *mutex, ++ const struct ntsync_mutex_args *args) ++{ ++ ntsync_assert_held(mutex); ++ ++ if (mutex->u.mutex.owner != args->owner) ++ return -EPERM; ++ ++ if (!--mutex->u.mutex.count) ++ mutex->u.mutex.owner = 0; ++ return 0; ++} ++ ++static int ntsync_mutex_unlock(struct ntsync_obj *mutex, void __user *argp) ++{ ++ struct ntsync_mutex_args __user *user_args = argp; ++ struct ntsync_device *dev = mutex->dev; ++ struct ntsync_mutex_args args; ++ __u32 prev_count; ++ bool all; ++ int ret; ++ ++ if (copy_from_user(&args, argp, sizeof(args))) ++ return -EFAULT; ++ if (!args.owner) ++ return -EINVAL; ++ ++ if (mutex->type != NTSYNC_TYPE_MUTEX) ++ return -EINVAL; ++ ++ all = ntsync_lock_obj(dev, mutex); ++ ++ prev_count = mutex->u.mutex.count; ++ ret = unlock_mutex_state(mutex, &args); ++ if (!ret) { ++ if (all) ++ try_wake_all_obj(dev, mutex); ++ try_wake_any_mutex(mutex); ++ } ++ ++ ntsync_unlock_obj(dev, mutex, all); ++ ++ if (!ret && put_user(prev_count, &user_args->count)) ++ ret = -EFAULT; ++ ++ return ret; ++} ++ ++/* ++ * Actually change the mutex state to mark its owner as dead, ++ * returning -EPERM if not the owner. ++ */ ++static int kill_mutex_state(struct ntsync_obj *mutex, __u32 owner) ++{ ++ ntsync_assert_held(mutex); ++ ++ if (mutex->u.mutex.owner != owner) ++ return -EPERM; ++ ++ mutex->u.mutex.ownerdead = true; ++ mutex->u.mutex.owner = 0; ++ mutex->u.mutex.count = 0; ++ return 0; ++} ++ ++static int ntsync_mutex_kill(struct ntsync_obj *mutex, void __user *argp) ++{ ++ struct ntsync_device *dev = mutex->dev; ++ __u32 owner; ++ bool all; ++ int ret; ++ ++ if (get_user(owner, (__u32 __user *)argp)) ++ return -EFAULT; ++ if (!owner) ++ return -EINVAL; ++ ++ if (mutex->type != NTSYNC_TYPE_MUTEX) ++ return -EINVAL; ++ ++ all = ntsync_lock_obj(dev, mutex); ++ ++ ret = kill_mutex_state(mutex, owner); ++ if (!ret) { ++ if (all) ++ try_wake_all_obj(dev, mutex); ++ try_wake_any_mutex(mutex); ++ } ++ ++ ntsync_unlock_obj(dev, mutex, all); ++ ++ return ret; ++} ++ ++static int ntsync_event_set(struct ntsync_obj *event, void __user *argp, bool pulse) ++{ ++ struct ntsync_device *dev = event->dev; ++ __u32 prev_state; ++ bool all; ++ ++ if (event->type != NTSYNC_TYPE_EVENT) ++ return -EINVAL; ++ ++ all = ntsync_lock_obj(dev, event); ++ ++ prev_state = event->u.event.signaled; ++ event->u.event.signaled = true; ++ if (all) ++ try_wake_all_obj(dev, event); ++ try_wake_any_event(event); ++ if (pulse) ++ event->u.event.signaled = false; ++ ++ ntsync_unlock_obj(dev, event, all); ++ ++ if (put_user(prev_state, (__u32 __user *)argp)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int ntsync_event_reset(struct ntsync_obj *event, void __user *argp) ++{ ++ struct ntsync_device *dev = event->dev; ++ __u32 prev_state; ++ bool all; ++ ++ if (event->type != NTSYNC_TYPE_EVENT) ++ return -EINVAL; ++ ++ all = ntsync_lock_obj(dev, event); ++ ++ prev_state = event->u.event.signaled; ++ event->u.event.signaled = false; ++ ++ ntsync_unlock_obj(dev, event, all); ++ ++ if (put_user(prev_state, (__u32 __user *)argp)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int ntsync_sem_read(struct ntsync_obj *sem, void __user *argp) ++{ ++ struct ntsync_sem_args __user *user_args = argp; ++ struct ntsync_device *dev = sem->dev; ++ struct ntsync_sem_args args; ++ bool all; ++ ++ if (sem->type != NTSYNC_TYPE_SEM) ++ return -EINVAL; ++ ++ all = ntsync_lock_obj(dev, sem); ++ ++ args.count = sem->u.sem.count; ++ args.max = sem->u.sem.max; ++ ++ ntsync_unlock_obj(dev, sem, all); ++ ++ if (copy_to_user(user_args, &args, sizeof(args))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ntsync_mutex_read(struct ntsync_obj *mutex, void __user *argp) ++{ ++ struct ntsync_mutex_args __user *user_args = argp; ++ struct ntsync_device *dev = mutex->dev; ++ struct ntsync_mutex_args args; ++ bool all; ++ int ret; ++ ++ if (mutex->type != NTSYNC_TYPE_MUTEX) ++ return -EINVAL; ++ ++ all = ntsync_lock_obj(dev, mutex); ++ ++ args.count = mutex->u.mutex.count; ++ args.owner = mutex->u.mutex.owner; ++ ret = mutex->u.mutex.ownerdead ? -EOWNERDEAD : 0; ++ ++ ntsync_unlock_obj(dev, mutex, all); ++ ++ if (copy_to_user(user_args, &args, sizeof(args))) ++ return -EFAULT; ++ return ret; ++} ++ ++static int ntsync_event_read(struct ntsync_obj *event, void __user *argp) ++{ ++ struct ntsync_event_args __user *user_args = argp; ++ struct ntsync_device *dev = event->dev; ++ struct ntsync_event_args args; ++ bool all; ++ ++ if (event->type != NTSYNC_TYPE_EVENT) ++ return -EINVAL; ++ ++ all = ntsync_lock_obj(dev, event); ++ ++ args.manual = event->u.event.manual; ++ args.signaled = event->u.event.signaled; ++ ++ ntsync_unlock_obj(dev, event, all); ++ ++ if (copy_to_user(user_args, &args, sizeof(args))) ++ return -EFAULT; ++ return 0; ++} ++ + static int ntsync_obj_release(struct inode *inode, struct file *file) + { + struct ntsync_obj *obj = file->private_data; +@@ -114,8 +668,24 @@ static long ntsync_obj_ioctl(struct file *file, unsigned int cmd, + void __user *argp = (void __user *)parm; + + switch (cmd) { +- case NTSYNC_IOC_SEM_POST: +- return ntsync_sem_post(obj, argp); ++ case NTSYNC_IOC_SEM_RELEASE: ++ return ntsync_sem_release(obj, argp); ++ case NTSYNC_IOC_SEM_READ: ++ return ntsync_sem_read(obj, argp); ++ case NTSYNC_IOC_MUTEX_UNLOCK: ++ return ntsync_mutex_unlock(obj, argp); ++ case NTSYNC_IOC_MUTEX_KILL: ++ return ntsync_mutex_kill(obj, argp); ++ case NTSYNC_IOC_MUTEX_READ: ++ return ntsync_mutex_read(obj, argp); ++ case NTSYNC_IOC_EVENT_SET: ++ return ntsync_event_set(obj, argp, false); ++ case NTSYNC_IOC_EVENT_RESET: ++ return ntsync_event_reset(obj, argp); ++ case NTSYNC_IOC_EVENT_PULSE: ++ return ntsync_event_set(obj, argp, true); ++ case NTSYNC_IOC_EVENT_READ: ++ return ntsync_event_read(obj, argp); + default: + return -ENOIOCTLCMD; + } +@@ -140,6 +710,9 @@ static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev, + obj->dev = dev; + get_file(dev->file); + spin_lock_init(&obj->lock); ++ INIT_LIST_HEAD(&obj->any_waiters); ++ INIT_LIST_HEAD(&obj->all_waiters); ++ atomic_set(&obj->all_hint, 0); + + return obj; + } +@@ -165,7 +738,6 @@ static int ntsync_obj_get_fd(struct ntsync_obj *obj) + + static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp) + { +- struct ntsync_sem_args __user *user_args = argp; + struct ntsync_sem_args args; + struct ntsync_obj *sem; + int fd; +@@ -182,12 +754,398 @@ static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp) + sem->u.sem.count = args.count; + sem->u.sem.max = args.max; + fd = ntsync_obj_get_fd(sem); +- if (fd < 0) { ++ if (fd < 0) + kfree(sem); +- return fd; ++ ++ return fd; ++} ++ ++static int ntsync_create_mutex(struct ntsync_device *dev, void __user *argp) ++{ ++ struct ntsync_mutex_args args; ++ struct ntsync_obj *mutex; ++ int fd; ++ ++ if (copy_from_user(&args, argp, sizeof(args))) ++ return -EFAULT; ++ ++ if (!args.owner != !args.count) ++ return -EINVAL; ++ ++ mutex = ntsync_alloc_obj(dev, NTSYNC_TYPE_MUTEX); ++ if (!mutex) ++ return -ENOMEM; ++ mutex->u.mutex.count = args.count; ++ mutex->u.mutex.owner = args.owner; ++ fd = ntsync_obj_get_fd(mutex); ++ if (fd < 0) ++ kfree(mutex); ++ ++ return fd; ++} ++ ++static int ntsync_create_event(struct ntsync_device *dev, void __user *argp) ++{ ++ struct ntsync_event_args args; ++ struct ntsync_obj *event; ++ int fd; ++ ++ if (copy_from_user(&args, argp, sizeof(args))) ++ return -EFAULT; ++ ++ event = ntsync_alloc_obj(dev, NTSYNC_TYPE_EVENT); ++ if (!event) ++ return -ENOMEM; ++ event->u.event.manual = args.manual; ++ event->u.event.signaled = args.signaled; ++ fd = ntsync_obj_get_fd(event); ++ if (fd < 0) ++ kfree(event); ++ ++ return fd; ++} ++ ++static struct ntsync_obj *get_obj(struct ntsync_device *dev, int fd) ++{ ++ struct file *file = fget(fd); ++ struct ntsync_obj *obj; ++ ++ if (!file) ++ return NULL; ++ ++ if (file->f_op != &ntsync_obj_fops) { ++ fput(file); ++ return NULL; + } + +- return put_user(fd, &user_args->sem); ++ obj = file->private_data; ++ if (obj->dev != dev) { ++ fput(file); ++ return NULL; ++ } ++ ++ return obj; ++} ++ ++static void put_obj(struct ntsync_obj *obj) ++{ ++ fput(obj->file); ++} ++ ++static int ntsync_schedule(const struct ntsync_q *q, const struct ntsync_wait_args *args) ++{ ++ ktime_t timeout = ns_to_ktime(args->timeout); ++ clockid_t clock = CLOCK_MONOTONIC; ++ ktime_t *timeout_ptr; ++ int ret = 0; ++ ++ timeout_ptr = (args->timeout == U64_MAX ? NULL : &timeout); ++ ++ if (args->flags & NTSYNC_WAIT_REALTIME) ++ clock = CLOCK_REALTIME; ++ ++ do { ++ if (signal_pending(current)) { ++ ret = -ERESTARTSYS; ++ break; ++ } ++ ++ set_current_state(TASK_INTERRUPTIBLE); ++ if (atomic_read(&q->signaled) != -1) { ++ ret = 0; ++ break; ++ } ++ ret = schedule_hrtimeout_range_clock(timeout_ptr, 0, HRTIMER_MODE_ABS, clock); ++ } while (ret < 0); ++ __set_current_state(TASK_RUNNING); ++ ++ return ret; ++} ++ ++/* ++ * Allocate and initialize the ntsync_q structure, but do not queue us yet. ++ */ ++static int setup_wait(struct ntsync_device *dev, ++ const struct ntsync_wait_args *args, bool all, ++ struct ntsync_q **ret_q) ++{ ++ int fds[NTSYNC_MAX_WAIT_COUNT + 1]; ++ const __u32 count = args->count; ++ struct ntsync_q *q; ++ __u32 total_count; ++ __u32 i, j; ++ ++ if (args->pad || (args->flags & ~NTSYNC_WAIT_REALTIME)) ++ return -EINVAL; ++ ++ if (args->count > NTSYNC_MAX_WAIT_COUNT) ++ return -EINVAL; ++ ++ total_count = count; ++ if (args->alert) ++ total_count++; ++ ++ if (copy_from_user(fds, u64_to_user_ptr(args->objs), ++ array_size(count, sizeof(*fds)))) ++ return -EFAULT; ++ if (args->alert) ++ fds[count] = args->alert; ++ ++ q = kmalloc(struct_size(q, entries, total_count), GFP_KERNEL); ++ if (!q) ++ return -ENOMEM; ++ q->task = current; ++ q->owner = args->owner; ++ atomic_set(&q->signaled, -1); ++ q->all = all; ++ q->ownerdead = false; ++ q->count = count; ++ ++ for (i = 0; i < total_count; i++) { ++ struct ntsync_q_entry *entry = &q->entries[i]; ++ struct ntsync_obj *obj = get_obj(dev, fds[i]); ++ ++ if (!obj) ++ goto err; ++ ++ if (all) { ++ /* Check that the objects are all distinct. */ ++ for (j = 0; j < i; j++) { ++ if (obj == q->entries[j].obj) { ++ put_obj(obj); ++ goto err; ++ } ++ } ++ } ++ ++ entry->obj = obj; ++ entry->q = q; ++ entry->index = i; ++ } ++ ++ *ret_q = q; ++ return 0; ++ ++err: ++ for (j = 0; j < i; j++) ++ put_obj(q->entries[j].obj); ++ kfree(q); ++ return -EINVAL; ++} ++ ++static void try_wake_any_obj(struct ntsync_obj *obj) ++{ ++ switch (obj->type) { ++ case NTSYNC_TYPE_SEM: ++ try_wake_any_sem(obj); ++ break; ++ case NTSYNC_TYPE_MUTEX: ++ try_wake_any_mutex(obj); ++ break; ++ case NTSYNC_TYPE_EVENT: ++ try_wake_any_event(obj); ++ break; ++ } ++} ++ ++static int ntsync_wait_any(struct ntsync_device *dev, void __user *argp) ++{ ++ struct ntsync_wait_args args; ++ __u32 i, total_count; ++ struct ntsync_q *q; ++ int signaled; ++ bool all; ++ int ret; ++ ++ if (copy_from_user(&args, argp, sizeof(args))) ++ return -EFAULT; ++ ++ ret = setup_wait(dev, &args, false, &q); ++ if (ret < 0) ++ return ret; ++ ++ total_count = args.count; ++ if (args.alert) ++ total_count++; ++ ++ /* queue ourselves */ ++ ++ for (i = 0; i < total_count; i++) { ++ struct ntsync_q_entry *entry = &q->entries[i]; ++ struct ntsync_obj *obj = entry->obj; ++ ++ all = ntsync_lock_obj(dev, obj); ++ list_add_tail(&entry->node, &obj->any_waiters); ++ ntsync_unlock_obj(dev, obj, all); ++ } ++ ++ /* ++ * Check if we are already signaled. ++ * ++ * Note that the API requires that normal objects are checked before ++ * the alert event. Hence we queue the alert event last, and check ++ * objects in order. ++ */ ++ ++ for (i = 0; i < total_count; i++) { ++ struct ntsync_obj *obj = q->entries[i].obj; ++ ++ if (atomic_read(&q->signaled) != -1) ++ break; ++ ++ all = ntsync_lock_obj(dev, obj); ++ try_wake_any_obj(obj); ++ ntsync_unlock_obj(dev, obj, all); ++ } ++ ++ /* sleep */ ++ ++ ret = ntsync_schedule(q, &args); ++ ++ /* and finally, unqueue */ ++ ++ for (i = 0; i < total_count; i++) { ++ struct ntsync_q_entry *entry = &q->entries[i]; ++ struct ntsync_obj *obj = entry->obj; ++ ++ all = ntsync_lock_obj(dev, obj); ++ list_del(&entry->node); ++ ntsync_unlock_obj(dev, obj, all); ++ ++ put_obj(obj); ++ } ++ ++ signaled = atomic_read(&q->signaled); ++ if (signaled != -1) { ++ struct ntsync_wait_args __user *user_args = argp; ++ ++ /* even if we caught a signal, we need to communicate success */ ++ ret = q->ownerdead ? -EOWNERDEAD : 0; ++ ++ if (put_user(signaled, &user_args->index)) ++ ret = -EFAULT; ++ } else if (!ret) { ++ ret = -ETIMEDOUT; ++ } ++ ++ kfree(q); ++ return ret; ++} ++ ++static int ntsync_wait_all(struct ntsync_device *dev, void __user *argp) ++{ ++ struct ntsync_wait_args args; ++ struct ntsync_q *q; ++ int signaled; ++ __u32 i; ++ int ret; ++ ++ if (copy_from_user(&args, argp, sizeof(args))) ++ return -EFAULT; ++ ++ ret = setup_wait(dev, &args, true, &q); ++ if (ret < 0) ++ return ret; ++ ++ /* queue ourselves */ ++ ++ mutex_lock(&dev->wait_all_lock); ++ ++ for (i = 0; i < args.count; i++) { ++ struct ntsync_q_entry *entry = &q->entries[i]; ++ struct ntsync_obj *obj = entry->obj; ++ ++ atomic_inc(&obj->all_hint); ++ ++ /* ++ * obj->all_waiters is protected by dev->wait_all_lock rather ++ * than obj->lock, so there is no need to acquire obj->lock ++ * here. ++ */ ++ list_add_tail(&entry->node, &obj->all_waiters); ++ } ++ if (args.alert) { ++ struct ntsync_q_entry *entry = &q->entries[args.count]; ++ struct ntsync_obj *obj = entry->obj; ++ ++ dev_lock_obj(dev, obj); ++ list_add_tail(&entry->node, &obj->any_waiters); ++ dev_unlock_obj(dev, obj); ++ } ++ ++ /* check if we are already signaled */ ++ ++ try_wake_all(dev, q, NULL); ++ ++ mutex_unlock(&dev->wait_all_lock); ++ ++ /* ++ * Check if the alert event is signaled, making sure to do so only ++ * after checking if the other objects are signaled. ++ */ ++ ++ if (args.alert) { ++ struct ntsync_obj *obj = q->entries[args.count].obj; ++ ++ if (atomic_read(&q->signaled) == -1) { ++ bool all = ntsync_lock_obj(dev, obj); ++ try_wake_any_obj(obj); ++ ntsync_unlock_obj(dev, obj, all); ++ } ++ } ++ ++ /* sleep */ ++ ++ ret = ntsync_schedule(q, &args); ++ ++ /* and finally, unqueue */ ++ ++ mutex_lock(&dev->wait_all_lock); ++ ++ for (i = 0; i < args.count; i++) { ++ struct ntsync_q_entry *entry = &q->entries[i]; ++ struct ntsync_obj *obj = entry->obj; ++ ++ /* ++ * obj->all_waiters is protected by dev->wait_all_lock rather ++ * than obj->lock, so there is no need to acquire it here. ++ */ ++ list_del(&entry->node); ++ ++ atomic_dec(&obj->all_hint); ++ ++ put_obj(obj); ++ } ++ ++ mutex_unlock(&dev->wait_all_lock); ++ ++ if (args.alert) { ++ struct ntsync_q_entry *entry = &q->entries[args.count]; ++ struct ntsync_obj *obj = entry->obj; ++ bool all; ++ ++ all = ntsync_lock_obj(dev, obj); ++ list_del(&entry->node); ++ ntsync_unlock_obj(dev, obj, all); ++ ++ put_obj(obj); ++ } ++ ++ signaled = atomic_read(&q->signaled); ++ if (signaled != -1) { ++ struct ntsync_wait_args __user *user_args = argp; ++ ++ /* even if we caught a signal, we need to communicate success */ ++ ret = q->ownerdead ? -EOWNERDEAD : 0; ++ ++ if (put_user(signaled, &user_args->index)) ++ ret = -EFAULT; ++ } else if (!ret) { ++ ret = -ETIMEDOUT; ++ } ++ ++ kfree(q); ++ return ret; + } + + static int ntsync_char_open(struct inode *inode, struct file *file) +@@ -198,6 +1156,8 @@ static int ntsync_char_open(struct inode *inode, struct file *file) + if (!dev) + return -ENOMEM; + ++ mutex_init(&dev->wait_all_lock); ++ + file->private_data = dev; + dev->file = file; + return nonseekable_open(inode, file); +@@ -219,8 +1179,16 @@ static long ntsync_char_ioctl(struct file *file, unsigned int cmd, + void __user *argp = (void __user *)parm; + + switch (cmd) { ++ case NTSYNC_IOC_CREATE_EVENT: ++ return ntsync_create_event(dev, argp); ++ case NTSYNC_IOC_CREATE_MUTEX: ++ return ntsync_create_mutex(dev, argp); + case NTSYNC_IOC_CREATE_SEM: + return ntsync_create_sem(dev, argp); ++ case NTSYNC_IOC_WAIT_ALL: ++ return ntsync_wait_all(dev, argp); ++ case NTSYNC_IOC_WAIT_ANY: ++ return ntsync_wait_any(dev, argp); + default: + return -ENOIOCTLCMD; + } +diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c +index be67382c00f6..2b13cc31d1dd 100644 +--- a/drivers/net/wireless/ath/ath11k/core.c ++++ b/drivers/net/wireless/ath/ath11k/core.c +@@ -724,7 +724,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { + .name = "qca2066 hw2.1", + .hw_rev = ATH11K_HW_QCA2066_HW21, + .fw = { +- .dir = "QCA2066/hw2.1", ++ .dir = "QCA206X/hw2.1", + .board_size = 256 * 1024, + .cal_offset = 128 * 1024, + }, +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +index e4395b1f8c11..d2caa80e9412 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +@@ -2712,7 +2712,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = { + BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID, WCC), + BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355, WCC), + BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID, WCC), +- BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC), ++ BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC_SEED), + BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID, WCC), + BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID, WCC), + BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID, WCC), +@@ -2723,7 +2723,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = { + BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID, WCC), + BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID, WCC), + BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID, WCC), +- BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC), ++ BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC_SEED), + BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID, BCA), + BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID, BCA), + BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID, BCA), +diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile +index 038ccbd9e3ba..de5e4f5145af 100644 +--- a/drivers/pci/controller/Makefile ++++ b/drivers/pci/controller/Makefile +@@ -1,4 +1,10 @@ + # SPDX-License-Identifier: GPL-2.0 ++ifdef CONFIG_X86_64 ++ifdef CONFIG_SATA_AHCI ++obj-y += intel-nvme-remap.o ++endif ++endif ++ + obj-$(CONFIG_PCIE_CADENCE) += cadence/ + obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o + obj-$(CONFIG_PCI_IXP4XX) += pci-ixp4xx.o +diff --git a/drivers/pci/controller/intel-nvme-remap.c b/drivers/pci/controller/intel-nvme-remap.c +new file mode 100644 +index 000000000000..e105e6f5cc91 +--- /dev/null ++++ b/drivers/pci/controller/intel-nvme-remap.c +@@ -0,0 +1,462 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Intel remapped NVMe device support. ++ * ++ * Copyright (c) 2019 Endless Mobile, Inc. ++ * Author: Daniel Drake ++ * ++ * Some products ship by default with the SATA controller in "RAID" or ++ * "Intel RST Premium With Intel Optane System Acceleration" mode. Under this ++ * mode, which we refer to as "remapped NVMe" mode, any installed NVMe ++ * devices disappear from the PCI bus, and instead their I/O memory becomes ++ * available within the AHCI device BARs. ++ * ++ * This scheme is understood to be a way of avoiding usage of the standard ++ * Windows NVMe driver under that OS, instead mandating usage of Intel's ++ * driver instead, which has better power management, and presumably offers ++ * some RAID/disk-caching solutions too. ++ * ++ * Here in this driver, we support the remapped NVMe mode by claiming the ++ * AHCI device and creating a fake PCIe root port. On the new bus, the ++ * original AHCI device is exposed with only minor tweaks. Then, fake PCI ++ * devices corresponding to the remapped NVMe devices are created. The usual ++ * ahci and nvme drivers are then expected to bind to these devices and ++ * operate as normal. ++ * ++ * The PCI configuration space for the NVMe devices is completely ++ * unavailable, so we fake a minimal one and hope for the best. ++ * ++ * Interrupts are shared between the AHCI and NVMe devices. For simplicity, ++ * we only support the legacy interrupt here, although MSI support ++ * could potentially be added later. ++ */ ++ ++#define MODULE_NAME "intel-nvme-remap" ++ ++#include ++#include ++#include ++#include ++#include ++ ++#define AHCI_PCI_BAR_STANDARD 5 ++ ++struct nvme_remap_dev { ++ struct pci_dev *dev; /* AHCI device */ ++ struct pci_bus *bus; /* our fake PCI bus */ ++ struct pci_sysdata sysdata; ++ int irq_base; /* our fake interrupts */ ++ ++ /* ++ * When we detect an all-ones write to a BAR register, this flag ++ * is set, so that we return the BAR size on the next read (a ++ * standard PCI behaviour). ++ * This includes the assumption that an all-ones BAR write is ++ * immediately followed by a read of the same register. ++ */ ++ bool bar_sizing; ++ ++ /* ++ * Resources copied from the AHCI device, to be regarded as ++ * resources on our fake bus. ++ */ ++ struct resource ahci_resources[PCI_NUM_RESOURCES]; ++ ++ /* Resources corresponding to the NVMe devices. */ ++ struct resource remapped_dev_mem[AHCI_MAX_REMAP]; ++ ++ /* Number of remapped NVMe devices found. */ ++ int num_remapped_devices; ++}; ++ ++static inline struct nvme_remap_dev *nrdev_from_bus(struct pci_bus *bus) ++{ ++ return container_of(bus->sysdata, struct nvme_remap_dev, sysdata); ++} ++ ++ ++/******** PCI configuration space **********/ ++ ++/* ++ * Helper macros for tweaking returned contents of PCI configuration space. ++ * ++ * value contains len bytes of data read from reg. ++ * If fixup_reg is included in that range, fix up the contents of that ++ * register to fixed_value. ++ */ ++#define NR_FIX8(fixup_reg, fixed_value) do { \ ++ if (reg <= fixup_reg && fixup_reg < reg + len) \ ++ ((u8 *) value)[fixup_reg - reg] = (u8) (fixed_value); \ ++ } while (0) ++ ++#define NR_FIX16(fixup_reg, fixed_value) do { \ ++ NR_FIX8(fixup_reg, fixed_value); \ ++ NR_FIX8(fixup_reg + 1, fixed_value >> 8); \ ++ } while (0) ++ ++#define NR_FIX24(fixup_reg, fixed_value) do { \ ++ NR_FIX8(fixup_reg, fixed_value); \ ++ NR_FIX8(fixup_reg + 1, fixed_value >> 8); \ ++ NR_FIX8(fixup_reg + 2, fixed_value >> 16); \ ++ } while (0) ++ ++#define NR_FIX32(fixup_reg, fixed_value) do { \ ++ NR_FIX16(fixup_reg, (u16) fixed_value); \ ++ NR_FIX16(fixup_reg + 2, fixed_value >> 16); \ ++ } while (0) ++ ++/* ++ * Read PCI config space of the slot 0 (AHCI) device. ++ * We pass through the read request to the underlying device, but ++ * tweak the results in some cases. ++ */ ++static int nvme_remap_pci_read_slot0(struct pci_bus *bus, int reg, ++ int len, u32 *value) ++{ ++ struct nvme_remap_dev *nrdev = nrdev_from_bus(bus); ++ struct pci_bus *ahci_dev_bus = nrdev->dev->bus; ++ int ret; ++ ++ ret = ahci_dev_bus->ops->read(ahci_dev_bus, nrdev->dev->devfn, ++ reg, len, value); ++ if (ret) ++ return ret; ++ ++ /* ++ * Adjust the device class, to prevent this driver from attempting to ++ * additionally probe the device we're simulating here. ++ */ ++ NR_FIX24(PCI_CLASS_PROG, PCI_CLASS_STORAGE_SATA_AHCI); ++ ++ /* ++ * Unset interrupt pin, otherwise ACPI tries to find routing ++ * info for our virtual IRQ, fails, and complains. ++ */ ++ NR_FIX8(PCI_INTERRUPT_PIN, 0); ++ ++ /* ++ * Truncate the AHCI BAR to not include the region that covers the ++ * hidden devices. This will cause the ahci driver to successfully ++ * probe th new device (instead of handing it over to this driver). ++ */ ++ if (nrdev->bar_sizing) { ++ NR_FIX32(PCI_BASE_ADDRESS_5, ~(SZ_16K - 1)); ++ nrdev->bar_sizing = false; ++ } ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++/* ++ * Read PCI config space of a remapped device. ++ * Since the original PCI config space is inaccessible, we provide a minimal, ++ * fake config space instead. ++ */ ++static int nvme_remap_pci_read_remapped(struct pci_bus *bus, unsigned int port, ++ int reg, int len, u32 *value) ++{ ++ struct nvme_remap_dev *nrdev = nrdev_from_bus(bus); ++ struct resource *remapped_mem; ++ ++ if (port > nrdev->num_remapped_devices) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ *value = 0; ++ remapped_mem = &nrdev->remapped_dev_mem[port - 1]; ++ ++ /* Set a Vendor ID, otherwise Linux assumes no device is present */ ++ NR_FIX16(PCI_VENDOR_ID, PCI_VENDOR_ID_INTEL); ++ ++ /* Always appear on & bus mastering */ ++ NR_FIX16(PCI_COMMAND, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); ++ ++ /* Set class so that nvme driver probes us */ ++ NR_FIX24(PCI_CLASS_PROG, PCI_CLASS_STORAGE_EXPRESS); ++ ++ if (nrdev->bar_sizing) { ++ NR_FIX32(PCI_BASE_ADDRESS_0, ++ ~(resource_size(remapped_mem) - 1)); ++ nrdev->bar_sizing = false; ++ } else { ++ resource_size_t mem_start = remapped_mem->start; ++ ++ mem_start |= PCI_BASE_ADDRESS_MEM_TYPE_64; ++ NR_FIX32(PCI_BASE_ADDRESS_0, mem_start); ++ mem_start >>= 32; ++ NR_FIX32(PCI_BASE_ADDRESS_1, mem_start); ++ } ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++/* Read PCI configuration space. */ ++static int nvme_remap_pci_read(struct pci_bus *bus, unsigned int devfn, ++ int reg, int len, u32 *value) ++{ ++ if (PCI_SLOT(devfn) == 0) ++ return nvme_remap_pci_read_slot0(bus, reg, len, value); ++ else ++ return nvme_remap_pci_read_remapped(bus, PCI_SLOT(devfn), ++ reg, len, value); ++} ++ ++/* ++ * Write PCI config space of the slot 0 (AHCI) device. ++ * Apart from the special case of BAR sizing, we disable all writes. ++ * Otherwise, the ahci driver could make changes (e.g. unset PCI bus master) ++ * that would affect the operation of the NVMe devices. ++ */ ++static int nvme_remap_pci_write_slot0(struct pci_bus *bus, int reg, ++ int len, u32 value) ++{ ++ struct nvme_remap_dev *nrdev = nrdev_from_bus(bus); ++ struct pci_bus *ahci_dev_bus = nrdev->dev->bus; ++ ++ if (reg >= PCI_BASE_ADDRESS_0 && reg <= PCI_BASE_ADDRESS_5) { ++ /* ++ * Writing all-ones to a BAR means that the size of the ++ * memory region is being checked. Flag this so that we can ++ * reply with an appropriate size on the next read. ++ */ ++ if (value == ~0) ++ nrdev->bar_sizing = true; ++ ++ return ahci_dev_bus->ops->write(ahci_dev_bus, ++ nrdev->dev->devfn, ++ reg, len, value); ++ } ++ ++ return PCIBIOS_SET_FAILED; ++} ++ ++/* ++ * Write PCI config space of a remapped device. ++ * Since the original PCI config space is inaccessible, we reject all ++ * writes, except for the special case of BAR probing. ++ */ ++static int nvme_remap_pci_write_remapped(struct pci_bus *bus, ++ unsigned int port, ++ int reg, int len, u32 value) ++{ ++ struct nvme_remap_dev *nrdev = nrdev_from_bus(bus); ++ ++ if (port > nrdev->num_remapped_devices) ++ return PCIBIOS_DEVICE_NOT_FOUND; ++ ++ /* ++ * Writing all-ones to a BAR means that the size of the memory ++ * region is being checked. Flag this so that we can reply with ++ * an appropriate size on the next read. ++ */ ++ if (value == ~0 && reg >= PCI_BASE_ADDRESS_0 ++ && reg <= PCI_BASE_ADDRESS_5) { ++ nrdev->bar_sizing = true; ++ return PCIBIOS_SUCCESSFUL; ++ } ++ ++ return PCIBIOS_SET_FAILED; ++} ++ ++/* Write PCI configuration space. */ ++static int nvme_remap_pci_write(struct pci_bus *bus, unsigned int devfn, ++ int reg, int len, u32 value) ++{ ++ if (PCI_SLOT(devfn) == 0) ++ return nvme_remap_pci_write_slot0(bus, reg, len, value); ++ else ++ return nvme_remap_pci_write_remapped(bus, PCI_SLOT(devfn), ++ reg, len, value); ++} ++ ++static struct pci_ops nvme_remap_pci_ops = { ++ .read = nvme_remap_pci_read, ++ .write = nvme_remap_pci_write, ++}; ++ ++ ++/******** Initialization & exit **********/ ++ ++/* ++ * Find a PCI domain ID to use for our fake bus. ++ * Start at 0x10000 to not clash with ACPI _SEG domains (16 bits). ++ */ ++static int find_free_domain(void) ++{ ++ int domain = 0xffff; ++ struct pci_bus *bus = NULL; ++ ++ while ((bus = pci_find_next_bus(bus)) != NULL) ++ domain = max_t(int, domain, pci_domain_nr(bus)); ++ ++ return domain + 1; ++} ++ ++static int find_remapped_devices(struct nvme_remap_dev *nrdev, ++ struct list_head *resources) ++{ ++ void __iomem *mmio; ++ int i, count = 0; ++ u32 cap; ++ ++ mmio = pcim_iomap(nrdev->dev, AHCI_PCI_BAR_STANDARD, ++ pci_resource_len(nrdev->dev, ++ AHCI_PCI_BAR_STANDARD)); ++ if (!mmio) ++ return -ENODEV; ++ ++ /* Check if this device might have remapped nvme devices. */ ++ if (pci_resource_len(nrdev->dev, AHCI_PCI_BAR_STANDARD) < SZ_512K || ++ !(readl(mmio + AHCI_VSCAP) & 1)) ++ return -ENODEV; ++ ++ cap = readq(mmio + AHCI_REMAP_CAP); ++ for (i = AHCI_MAX_REMAP-1; i >= 0; i--) { ++ struct resource *remapped_mem; ++ ++ if ((cap & (1 << i)) == 0) ++ continue; ++ if (readl(mmio + ahci_remap_dcc(i)) ++ != PCI_CLASS_STORAGE_EXPRESS) ++ continue; ++ ++ /* We've found a remapped device */ ++ remapped_mem = &nrdev->remapped_dev_mem[count++]; ++ remapped_mem->start = ++ pci_resource_start(nrdev->dev, AHCI_PCI_BAR_STANDARD) ++ + ahci_remap_base(i); ++ remapped_mem->end = remapped_mem->start ++ + AHCI_REMAP_N_SIZE - 1; ++ remapped_mem->flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED; ++ pci_add_resource(resources, remapped_mem); ++ } ++ ++ pcim_iounmap(nrdev->dev, mmio); ++ ++ if (count == 0) ++ return -ENODEV; ++ ++ nrdev->num_remapped_devices = count; ++ dev_info(&nrdev->dev->dev, "Found %d remapped NVMe devices\n", ++ nrdev->num_remapped_devices); ++ return 0; ++} ++ ++static void nvme_remap_remove_root_bus(void *data) ++{ ++ struct pci_bus *bus = data; ++ ++ pci_stop_root_bus(bus); ++ pci_remove_root_bus(bus); ++} ++ ++static int nvme_remap_probe(struct pci_dev *dev, ++ const struct pci_device_id *id) ++{ ++ struct nvme_remap_dev *nrdev; ++ LIST_HEAD(resources); ++ int i; ++ int ret; ++ struct pci_dev *child; ++ ++ nrdev = devm_kzalloc(&dev->dev, sizeof(*nrdev), GFP_KERNEL); ++ nrdev->sysdata.domain = find_free_domain(); ++ nrdev->sysdata.nvme_remap_dev = dev; ++ nrdev->dev = dev; ++ pci_set_drvdata(dev, nrdev); ++ ++ ret = pcim_enable_device(dev); ++ if (ret < 0) ++ return ret; ++ ++ pci_set_master(dev); ++ ++ ret = find_remapped_devices(nrdev, &resources); ++ if (ret) ++ return ret; ++ ++ /* Add resources from the original AHCI device */ ++ for (i = 0; i < PCI_NUM_RESOURCES; i++) { ++ struct resource *res = &dev->resource[i]; ++ ++ if (res->start) { ++ struct resource *nr_res = &nrdev->ahci_resources[i]; ++ ++ nr_res->start = res->start; ++ nr_res->end = res->end; ++ nr_res->flags = res->flags; ++ pci_add_resource(&resources, nr_res); ++ } ++ } ++ ++ /* Create virtual interrupts */ ++ nrdev->irq_base = devm_irq_alloc_descs(&dev->dev, -1, 0, ++ nrdev->num_remapped_devices + 1, ++ 0); ++ if (nrdev->irq_base < 0) ++ return nrdev->irq_base; ++ ++ /* Create and populate PCI bus */ ++ nrdev->bus = pci_create_root_bus(&dev->dev, 0, &nvme_remap_pci_ops, ++ &nrdev->sysdata, &resources); ++ if (!nrdev->bus) ++ return -ENODEV; ++ ++ if (devm_add_action_or_reset(&dev->dev, nvme_remap_remove_root_bus, ++ nrdev->bus)) ++ return -ENOMEM; ++ ++ /* We don't support sharing MSI interrupts between these devices */ ++ nrdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI; ++ ++ pci_scan_child_bus(nrdev->bus); ++ ++ list_for_each_entry(child, &nrdev->bus->devices, bus_list) { ++ /* ++ * Prevent PCI core from trying to move memory BARs around. ++ * The hidden NVMe devices are at fixed locations. ++ */ ++ for (i = 0; i < PCI_NUM_RESOURCES; i++) { ++ struct resource *res = &child->resource[i]; ++ ++ if (res->flags & IORESOURCE_MEM) ++ res->flags |= IORESOURCE_PCI_FIXED; ++ } ++ ++ /* Share the legacy IRQ between all devices */ ++ child->irq = dev->irq; ++ } ++ ++ pci_assign_unassigned_bus_resources(nrdev->bus); ++ pci_bus_add_devices(nrdev->bus); ++ ++ return 0; ++} ++ ++static const struct pci_device_id nvme_remap_ids[] = { ++ /* ++ * Match all Intel RAID controllers. ++ * ++ * There's overlap here with the set of devices detected by the ahci ++ * driver, but ahci will only successfully probe when there ++ * *aren't* any remapped NVMe devices, and this driver will only ++ * successfully probe when there *are* remapped NVMe devices that ++ * need handling. ++ */ ++ { ++ PCI_VDEVICE(INTEL, PCI_ANY_ID), ++ .class = PCI_CLASS_STORAGE_RAID << 8, ++ .class_mask = 0xffffff00, + }, -+#endif /* V4L2_PIX_FMT_YUV565 */ ++ {0,} ++}; ++MODULE_DEVICE_TABLE(pci, nvme_remap_ids); ++ ++static struct pci_driver nvme_remap_drv = { ++ .name = MODULE_NAME, ++ .id_table = nvme_remap_ids, ++ .probe = nvme_remap_probe, ++}; ++module_pci_driver(nvme_remap_drv); ++ ++MODULE_AUTHOR("Daniel Drake "); ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c +index 76f4df75b08a..49c1a91c611d 100644 +--- a/drivers/pci/quirks.c ++++ b/drivers/pci/quirks.c +@@ -3746,6 +3746,106 @@ static void quirk_no_bus_reset(struct pci_dev *dev) + dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; + } + ++static bool acs_on_downstream; ++static bool acs_on_multifunction; ++ ++#define NUM_ACS_IDS 16 ++struct acs_on_id { ++ unsigned short vendor; ++ unsigned short device; ++}; ++static struct acs_on_id acs_on_ids[NUM_ACS_IDS]; ++static u8 max_acs_id; ++ ++static __init int pcie_acs_override_setup(char *p) ++{ ++ if (!p) ++ return -EINVAL; ++ ++ while (*p) { ++ if (!strncmp(p, "downstream", 10)) ++ acs_on_downstream = true; ++ if (!strncmp(p, "multifunction", 13)) ++ acs_on_multifunction = true; ++ if (!strncmp(p, "id:", 3)) { ++ char opt[5]; ++ int ret; ++ long val; ++ ++ if (max_acs_id >= NUM_ACS_IDS - 1) { ++ pr_warn("Out of PCIe ACS override slots (%d)\n", ++ NUM_ACS_IDS); ++ goto next; ++ } ++ ++ p += 3; ++ snprintf(opt, 5, "%s", p); ++ ret = kstrtol(opt, 16, &val); ++ if (ret) { ++ pr_warn("PCIe ACS ID parse error %d\n", ret); ++ goto next; ++ } ++ acs_on_ids[max_acs_id].vendor = val; ++ ++ p += strcspn(p, ":"); ++ if (*p != ':') { ++ pr_warn("PCIe ACS invalid ID\n"); ++ goto next; ++ } ++ ++ p++; ++ snprintf(opt, 5, "%s", p); ++ ret = kstrtol(opt, 16, &val); ++ if (ret) { ++ pr_warn("PCIe ACS ID parse error %d\n", ret); ++ goto next; ++ } ++ acs_on_ids[max_acs_id].device = val; ++ max_acs_id++; ++ } ++next: ++ p += strcspn(p, ","); ++ if (*p == ',') ++ p++; ++ } ++ ++ if (acs_on_downstream || acs_on_multifunction || max_acs_id) ++ pr_warn("Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA\n"); ++ ++ return 0; ++} ++early_param("pcie_acs_override", pcie_acs_override_setup); ++ ++static int pcie_acs_overrides(struct pci_dev *dev, u16 acs_flags) ++{ ++ int i; ++ ++ /* Never override ACS for legacy devices or devices with ACS caps */ ++ if (!pci_is_pcie(dev) || ++ pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS)) ++ return -ENOTTY; ++ ++ for (i = 0; i < max_acs_id; i++) ++ if (acs_on_ids[i].vendor == dev->vendor && ++ acs_on_ids[i].device == dev->device) ++ return 1; ++ ++ switch (pci_pcie_type(dev)) { ++ case PCI_EXP_TYPE_DOWNSTREAM: ++ case PCI_EXP_TYPE_ROOT_PORT: ++ if (acs_on_downstream) ++ return 1; ++ break; ++ case PCI_EXP_TYPE_ENDPOINT: ++ case PCI_EXP_TYPE_UPSTREAM: ++ case PCI_EXP_TYPE_LEG_END: ++ case PCI_EXP_TYPE_RC_END: ++ if (acs_on_multifunction && dev->multifunction) ++ return 1; ++ } ++ ++ return -ENOTTY; ++} + /* + * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be + * prevented for those affected devices. +@@ -5170,6 +5270,7 @@ static const struct pci_dev_acs_enabled { + { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, + /* Wangxun nics */ + { PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs }, ++ { PCI_ANY_ID, PCI_ANY_ID, pcie_acs_overrides }, + { 0 } + }; + +diff --git a/drivers/pci/vgaarb.c b/drivers/pci/vgaarb.c +index 78748e8d2dba..2b2b558cebe6 100644 +--- a/drivers/pci/vgaarb.c ++++ b/drivers/pci/vgaarb.c +@@ -143,6 +143,7 @@ void vga_set_default_device(struct pci_dev *pdev) + pci_dev_put(vga_default); + vga_default = pci_dev_get(pdev); + } ++EXPORT_SYMBOL_GPL(vga_set_default_device); + + /** + * vga_remove_vgacon - deactivate VGA console +diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig +index 0258dd879d64..b160173a530e 100644 +--- a/drivers/platform/x86/Kconfig ++++ b/drivers/platform/x86/Kconfig +@@ -267,6 +267,18 @@ config ASUS_WIRELESS + If you choose to compile this driver as a module the module will be + called asus-wireless. + ++config ASUS_ARMOURY ++ tristate "ASUS Armoury driver" ++ depends on ASUS_WMI ++ select FW_ATTR_CLASS ++ help ++ Say Y here if you have a WMI aware Asus machine and would like to use the ++ firmware_attributes API to control various settings typically exposed in ++ the ASUS Armoury Crate application available on Windows. ++ ++ To compile this driver as a module, choose M here: the module will ++ be called asus-armoury. ++ + config ASUS_WMI + tristate "ASUS WMI Driver" + depends on ACPI_WMI +@@ -289,6 +301,15 @@ config ASUS_WMI + To compile this driver as a module, choose M here: the module will + be called asus-wmi. + ++config ASUS_WMI_DEPRECATED_ATTRS ++ bool "BIOS option support in WMI platform (DEPRECATED)" ++ depends on ASUS_WMI ++ default y ++ help ++ Say Y to expose the configurable BIOS options through the asus-wmi ++ driver. This can be used with or without the asus-armoury driver which ++ has the same attributes, but more, and better features. ++ + config ASUS_NB_WMI + tristate "Asus Notebook WMI Driver" + depends on ASUS_WMI +diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile +index e1b142947067..fe3e7e7dede8 100644 +--- a/drivers/platform/x86/Makefile ++++ b/drivers/platform/x86/Makefile +@@ -32,6 +32,7 @@ obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o + # ASUS + obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o + obj-$(CONFIG_ASUS_WIRELESS) += asus-wireless.o ++obj-$(CONFIG_ASUS_ARMOURY) += asus-armoury.o + obj-$(CONFIG_ASUS_WMI) += asus-wmi.o + obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o + obj-$(CONFIG_ASUS_TF103C_DOCK) += asus-tf103c-dock.o +diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c +index 1417e230edbd..e69785af8e1d 100644 +--- a/drivers/platform/x86/apple-gmux.c ++++ b/drivers/platform/x86/apple-gmux.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -107,6 +108,10 @@ struct apple_gmux_config { + + # define MMIO_GMUX_MAX_BRIGHTNESS 0xffff + ++static bool force_igd; ++module_param(force_igd, bool, 0); ++MODULE_PARM_DESC(force_idg, "Switch gpu to igd on module load. Make sure that you have apple-set-os set up and the iGPU is in `lspci -s 00:02.0`. (default: false) (bool)"); ++ + static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port) + { + return inb(gmux_data->iostart + port); +@@ -945,6 +950,19 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) + gmux_enable_interrupts(gmux_data); + gmux_read_switch_state(gmux_data); + ++ if (force_igd) { ++ struct pci_dev *pdev; ++ ++ pdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(2, 0)); ++ if (pdev) { ++ pr_info("Switching to IGD"); ++ gmux_switchto(VGA_SWITCHEROO_IGD); ++ vga_set_default_device(pdev); ++ } else { ++ pr_err("force_idg is true, but couldn't find iGPU at 00:02.0! Is apple-set-os working?"); ++ } ++ } ++ + /* + * Retina MacBook Pros cannot switch the panel's AUX separately + * and need eDP pre-calibration. They are distinguishable from +diff --git a/drivers/platform/x86/asus-armoury.c b/drivers/platform/x86/asus-armoury.c +new file mode 100644 +index 000000000000..4dac91d02278 +--- /dev/null ++++ b/drivers/platform/x86/asus-armoury.c +@@ -0,0 +1,1074 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Asus Armoury (WMI) attributes driver. This driver uses the fw_attributes ++ * class to expose the various WMI functions that many gaming and some ++ * non-gaming ASUS laptops have available. ++ * These typically don't fit anywhere else in the sysfs such as under LED class, ++ * hwmon or other, and are set in Windows using the ASUS Armoury Crate tool. ++ * ++ * Copyright(C) 2024 Luke Jones ++ */ ++ ++#include "linux/cleanup.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "asus-armoury.h" ++#include "firmware_attributes_class.h" ++ ++#define DEBUG ++ ++#define ASUS_NB_WMI_EVENT_GUID "0B3CBB35-E3C2-45ED-91C2-4C5A6D195D1C" ++ ++#define ASUS_MINI_LED_MODE_MASK 0x03 ++/* Standard modes for devices with only on/off */ ++#define ASUS_MINI_LED_OFF 0x00 ++#define ASUS_MINI_LED_ON 0x01 ++/* Like "on" but the effect is more vibrant or brighter */ ++#define ASUS_MINI_LED_STRONG_MODE 0x02 ++/* New modes for devices with 3 mini-led mode types */ ++#define ASUS_MINI_LED_2024_WEAK 0x00 ++#define ASUS_MINI_LED_2024_STRONG 0x01 ++#define ASUS_MINI_LED_2024_OFF 0x02 ++ ++#define ASUS_POWER_CORE_MASK GENMASK(15, 8) ++#define ASUS_PERF_CORE_MASK GENMASK(7, 0) ++ ++enum cpu_core_type { ++ CPU_CORE_PERF = 0, ++ CPU_CORE_POWER, ++}; ++ ++enum cpu_core_value { ++ CPU_CORE_DEFAULT = 0, ++ CPU_CORE_MIN, ++ CPU_CORE_MAX, ++ CPU_CORE_CURRENT, ++}; ++ ++#define CPU_PERF_CORE_COUNT_MIN 4 ++#define CPU_POWR_CORE_COUNT_MIN 0 ++ ++/* Default limits for tunables available on ASUS ROG laptops */ ++#define NVIDIA_BOOST_MIN 5 ++#define NVIDIA_BOOST_MAX 25 ++#define NVIDIA_TEMP_MIN 75 ++#define NVIDIA_TEMP_MAX 87 ++#define NVIDIA_POWER_MIN 0 ++#define NVIDIA_POWER_MAX 70 ++#define NVIDIA_POWER_DEFAULT 70 ++#define PPT_CPU_LIMIT_MIN 5 ++#define PPT_CPU_LIMIT_MAX 150 ++#define PPT_CPU_LIMIT_DEFAULT 80 ++#define PPT_PLATFORM_MIN 5 ++#define PPT_PLATFORM_MAX 100 ++#define PPT_PLATFORM_DEFAULT 80 ++ ++/* Tunables provided by ASUS for gaming laptops */ ++struct rog_tunables { ++ u32 cpu_default; ++ u32 cpu_min; ++ u32 cpu_max; ++ ++ u32 platform_default; ++ u32 platform_min; ++ u32 platform_max; ++ ++ u32 ppt_pl1_spl; // cpu ++ u32 ppt_pl2_sppt; // cpu ++ u32 ppt_pl3_fppt; // cpu ++ u32 ppt_apu_sppt; // plat ++ u32 ppt_platform_sppt; // plat ++ ++ u32 nv_boost_default; ++ u32 nv_boost_min; ++ u32 nv_boost_max; ++ u32 nv_dynamic_boost; ++ ++ u32 nv_temp_default; ++ u32 nv_temp_min; ++ u32 nv_temp_max; ++ u32 nv_temp_target; ++ ++ u32 dgpu_tgp_default; ++ u32 dgpu_tgp_min; ++ u32 dgpu_tgp_max; ++ u32 dgpu_tgp; ++ ++ u32 cur_perf_cores; ++ u32 min_perf_cores; ++ u32 max_perf_cores; ++ u32 cur_power_cores; ++ u32 min_power_cores; ++ u32 max_power_cores; ++}; ++ ++static const struct class *fw_attr_class; ++ ++struct asus_armoury_priv { ++ struct device *fw_attr_dev; ++ struct kset *fw_attr_kset; ++ ++ struct rog_tunables *rog_tunables; ++ u32 mini_led_dev_id; ++ u32 gpu_mux_dev_id; ++ ++ struct mutex mutex; ++}; ++ ++static struct asus_armoury_priv asus_armoury = { ++ .mutex = __MUTEX_INITIALIZER(asus_armoury.mutex) ++}; ++ ++struct fw_attrs_group { ++ bool pending_reboot; ++}; ++ ++static struct fw_attrs_group fw_attrs = { ++ .pending_reboot = false, ++}; ++ ++struct asus_attr_group { ++ const struct attribute_group *attr_group; ++ u32 wmi_devid; ++}; ++ ++static bool asus_wmi_is_present(u32 dev_id) ++{ ++ u32 retval; ++ int status; ++ ++ status = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, dev_id, 0, &retval); ++ pr_debug("%s called (0x%08x), retval: 0x%08x\n", __func__, dev_id, retval); ++ ++ return status == 0 && (retval & ASUS_WMI_DSTS_PRESENCE_BIT); ++} ++ ++static void asus_set_reboot_and_signal_event(void) ++{ ++ fw_attrs.pending_reboot = true; ++ kobject_uevent(&asus_armoury.fw_attr_dev->kobj, KOBJ_CHANGE); ++} ++ ++static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) ++{ ++ return sysfs_emit(buf, "%d\n", fw_attrs.pending_reboot); ++} ++ ++static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot); ++ ++static bool asus_bios_requires_reboot(struct kobj_attribute *attr) ++{ ++ return !strcmp(attr->attr.name, "gpu_mux_mode") || ++ !strcmp(attr->attr.name, "cores_performance") || ++ !strcmp(attr->attr.name, "cores_efficiency") || ++ !strcmp(attr->attr.name, "panel_hd_mode"); ++} ++ ++static int armoury_wmi_set_devstate(struct kobj_attribute *attr, u32 value, u32 wmi_dev) ++{ ++ u32 result; ++ int err; ++ ++ guard(mutex)(&asus_armoury.mutex); ++ err = asus_wmi_set_devstate(wmi_dev, value, &result); ++ if (err) { ++ pr_err("Failed to set %s: %d\n", attr->attr.name, err); ++ return err; ++ } ++ /* ++ * !1 is usually considered a fail by ASUS, but some WMI methods do use > 1 ++ * to return a status code or similar. ++ */ ++ if (result < 1) { ++ pr_err("Failed to set %s: (result): 0x%x\n", attr->attr.name, result); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++/** ++ * attr_int_store() - Send an int to wmi method, checks if within min/max exclusive. ++ * @kobj: Pointer to the driver object. ++ * @attr: Pointer to the attribute calling this function. ++ * @buf: The buffer to read from, this is parsed to `int` type. ++ * @count: Required by sysfs attribute macros, pass in from the callee attr. ++ * @min: Minimum accepted value. Below this returns -EINVAL. ++ * @max: Maximum accepted value. Above this returns -EINVAL. ++ * @store_value: Pointer to where the parsed value should be stored. ++ * @wmi_dev: The WMI function ID to use. ++ * ++ * This function is intended to be generic so it can be called from any "_store" ++ * attribute which works only with integers. The integer to be sent to the WMI method ++ * is range checked and an error returned if out of range. ++ * ++ * If the value is valid and WMI is success, then the sysfs attribute is notified ++ * and if asus_bios_requires_reboot() is true then reboot attribute is also notified. ++ * ++ * Returns: Either count, or an error. ++ */ ++static ssize_t attr_uint_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, ++ size_t count, u32 min, u32 max, u32 *store_value, u32 wmi_dev) ++{ ++ u32 value; ++ int err; ++ ++ err = kstrtouint(buf, 10, &value); ++ if (err) ++ return err; ++ ++ if (value < min || value > max) ++ return -EINVAL; ++ ++ err = armoury_wmi_set_devstate(attr, value, wmi_dev); ++ if (err) ++ return err; ++ ++ if (store_value != NULL) ++ *store_value = value; ++ sysfs_notify(kobj, NULL, attr->attr.name); ++ ++ if (asus_bios_requires_reboot(attr)) ++ asus_set_reboot_and_signal_event(); ++ ++ return count; ++} ++ ++/* Mini-LED mode **************************************************************/ ++static ssize_t mini_led_mode_current_value_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ u32 value; ++ int err; ++ ++ err = asus_wmi_get_devstate_dsts(asus_armoury.mini_led_dev_id, &value); ++ if (err) ++ return err; ++ ++ value &= ASUS_MINI_LED_MODE_MASK; ++ ++ /* ++ * Remap the mode values to match previous generation mini-LED. The last gen ++ * WMI 0 == off, while on this version WMI 2 == off (flipped). ++ */ ++ if (asus_armoury.mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2) { ++ switch (value) { ++ case ASUS_MINI_LED_2024_WEAK: ++ value = ASUS_MINI_LED_ON; ++ break; ++ case ASUS_MINI_LED_2024_STRONG: ++ value = ASUS_MINI_LED_STRONG_MODE; ++ break; ++ case ASUS_MINI_LED_2024_OFF: ++ value = ASUS_MINI_LED_OFF; ++ break; ++ } ++ } ++ ++ return sysfs_emit(buf, "%u\n", value); ++} ++ ++static ssize_t mini_led_mode_current_value_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ u32 mode; ++ int err; ++ ++ err = kstrtou32(buf, 10, &mode); ++ if (err) ++ return err; ++ ++ if (asus_armoury.mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE && ++ mode > ASUS_MINI_LED_ON) ++ return -EINVAL; ++ if (asus_armoury.mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2 && ++ mode > ASUS_MINI_LED_STRONG_MODE) ++ return -EINVAL; ++ ++ /* ++ * Remap the mode values so expected behaviour is the same as the last ++ * generation of mini-LED with 0 == off, 1 == on. ++ */ ++ if (asus_armoury.mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2) { ++ switch (mode) { ++ case ASUS_MINI_LED_OFF: ++ mode = ASUS_MINI_LED_2024_OFF; ++ break; ++ case ASUS_MINI_LED_ON: ++ mode = ASUS_MINI_LED_2024_WEAK; ++ break; ++ case ASUS_MINI_LED_STRONG_MODE: ++ mode = ASUS_MINI_LED_2024_STRONG; ++ break; ++ } ++ } ++ ++ err = armoury_wmi_set_devstate(attr, mode, asus_armoury.mini_led_dev_id); ++ if (err) ++ return err; ++ ++ sysfs_notify(kobj, NULL, attr->attr.name); ++ ++ return count; ++} ++ ++static ssize_t mini_led_mode_possible_values_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ switch (asus_armoury.mini_led_dev_id) { ++ case ASUS_WMI_DEVID_MINI_LED_MODE: ++ return sysfs_emit(buf, "0;1\n"); ++ case ASUS_WMI_DEVID_MINI_LED_MODE2: ++ return sysfs_emit(buf, "0;1;2\n"); ++ } ++ ++ return sysfs_emit(buf, "0\n"); ++} ++ ++ATTR_GROUP_ENUM_CUSTOM(mini_led_mode, "mini_led_mode", "Set the mini-LED backlight mode"); ++ ++static ssize_t gpu_mux_mode_current_value_store(struct kobject *kobj, ++ struct kobj_attribute *attr, const char *buf, ++ size_t count) ++{ ++ int result, err; ++ u32 optimus; ++ ++ err = kstrtou32(buf, 10, &optimus); ++ if (err) ++ return err; ++ ++ if (optimus > 1) ++ return -EINVAL; ++ ++ if (asus_wmi_is_present(ASUS_WMI_DEVID_DGPU)) { ++ err = asus_wmi_get_devstate_dsts(ASUS_WMI_DEVID_DGPU, &result); ++ if (err) ++ return err; ++ if (result && !optimus) { ++ err = -ENODEV; ++ pr_warn("Can not switch MUX to dGPU mode when dGPU is disabled: %02X %02X %d\n", ++ result, optimus, err); ++ return err; ++ } ++ } ++ ++ if (asus_wmi_is_present(ASUS_WMI_DEVID_EGPU)) { ++ err = asus_wmi_get_devstate_dsts(ASUS_WMI_DEVID_EGPU, &result); ++ if (err) ++ return err; ++ if (result && !optimus) { ++ err = -ENODEV; ++ pr_warn("Can not switch MUX to dGPU mode when eGPU is enabled: %d\n", ++ err); ++ return err; ++ } ++ } ++ ++ err = armoury_wmi_set_devstate(attr, optimus, asus_armoury.gpu_mux_dev_id); ++ if (err) ++ return err; ++ ++ sysfs_notify(kobj, NULL, attr->attr.name); ++ asus_set_reboot_and_signal_event(); ++ ++ return count; ++} ++WMI_SHOW_INT(gpu_mux_mode_current_value, "%d\n", asus_armoury.gpu_mux_dev_id); ++ATTR_GROUP_BOOL_CUSTOM(gpu_mux_mode, "gpu_mux_mode", "Set the GPU display MUX mode"); ++ ++/* ++ * A user may be required to store the value twice, typical store first, then ++ * rescan PCI bus to activate power, then store a second time to save correctly. ++ */ ++static ssize_t dgpu_disable_current_value_store(struct kobject *kobj, ++ struct kobj_attribute *attr, const char *buf, ++ size_t count) ++{ ++ int result, err; ++ u32 disable; ++ ++ err = kstrtou32(buf, 10, &disable); ++ if (err) ++ return err; ++ ++ if (disable > 1) ++ return -EINVAL; ++ ++ if (asus_armoury.gpu_mux_dev_id) { ++ err = asus_wmi_get_devstate_dsts(asus_armoury.gpu_mux_dev_id, &result); ++ if (err) ++ return err; ++ if (!result && disable) { ++ err = -ENODEV; ++ pr_warn("Can not disable dGPU when the MUX is in dGPU mode: %d\n", err); ++ return err; ++ } ++ // TODO: handle a > 1 result, shouold do a PCI rescan and run again ++ } ++ ++ err = armoury_wmi_set_devstate(attr, disable, ASUS_WMI_DEVID_DGPU); ++ if (err) ++ return err; ++ ++ sysfs_notify(kobj, NULL, attr->attr.name); ++ ++ return count; ++} ++WMI_SHOW_INT(dgpu_disable_current_value, "%d\n", ASUS_WMI_DEVID_DGPU); ++ATTR_GROUP_BOOL_CUSTOM(dgpu_disable, "dgpu_disable", "Disable the dGPU"); ++ ++/* The ACPI call to enable the eGPU also disables the internal dGPU */ ++static ssize_t egpu_enable_current_value_store(struct kobject *kobj, struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int result, err; ++ u32 enable; ++ ++ err = kstrtou32(buf, 10, &enable); ++ if (err) ++ return err; ++ ++ if (enable > 1) ++ return -EINVAL; ++ ++ err = asus_wmi_get_devstate_dsts(ASUS_WMI_DEVID_EGPU_CONNECTED, &result); ++ if (err) { ++ pr_warn("Failed to get eGPU connection status: %d\n", err); ++ return err; ++ } ++ ++ if (asus_armoury.gpu_mux_dev_id) { ++ err = asus_wmi_get_devstate_dsts(asus_armoury.gpu_mux_dev_id, &result); ++ if (err) { ++ pr_warn("Failed to get GPU MUX status: %d\n", result); ++ return result; ++ } ++ if (!result && enable) { ++ err = -ENODEV; ++ pr_warn("Can not enable eGPU when the MUX is in dGPU mode: %d\n", err); ++ return err; ++ } ++ } ++ ++ err = armoury_wmi_set_devstate(attr, enable, ASUS_WMI_DEVID_EGPU); ++ if (err) ++ return err; ++ ++ sysfs_notify(kobj, NULL, attr->attr.name); ++ ++ return count; ++} ++WMI_SHOW_INT(egpu_enable_current_value, "%d\n", ASUS_WMI_DEVID_EGPU); ++ATTR_GROUP_BOOL_CUSTOM(egpu_enable, "egpu_enable", "Enable the eGPU (also disables dGPU)"); ++ ++/* Device memory available to APU */ ++ ++static ssize_t apu_mem_current_value_show(struct kobject *kobj, struct kobj_attribute *attr, ++ char *buf) ++{ ++ int err; ++ u32 mem; ++ ++ err = asus_wmi_get_devstate_dsts(ASUS_WMI_DEVID_APU_MEM, &mem); ++ if (err) ++ return err; ++ ++ switch (mem) { ++ case 0x100: ++ mem = 0; ++ break; ++ case 0x102: ++ mem = 1; ++ break; ++ case 0x103: ++ mem = 2; ++ break; ++ case 0x104: ++ mem = 3; ++ break; ++ case 0x105: ++ mem = 4; ++ break; ++ case 0x106: ++ /* This is out of order and looks wrong but is correct */ ++ mem = 8; ++ break; ++ case 0x107: ++ mem = 5; ++ break; ++ case 0x108: ++ mem = 6; ++ break; ++ case 0x109: ++ mem = 7; ++ break; ++ default: ++ mem = 4; ++ break; ++ } ++ ++ return sysfs_emit(buf, "%u\n", mem); ++} ++ ++static ssize_t apu_mem_current_value_store(struct kobject *kobj, struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int result, err; ++ u32 requested, mem; ++ ++ result = kstrtou32(buf, 10, &requested); ++ if (result) ++ return result; ++ ++ switch (requested) { ++ case 0: ++ mem = 0x000; ++ break; ++ case 1: ++ mem = 0x102; ++ break; ++ case 2: ++ mem = 0x103; ++ break; ++ case 3: ++ mem = 0x104; ++ break; ++ case 4: ++ mem = 0x105; ++ break; ++ case 5: ++ mem = 0x107; ++ break; ++ case 6: ++ mem = 0x108; ++ break; ++ case 7: ++ mem = 0x109; ++ break; ++ case 8: ++ /* This is out of order and looks wrong but is correct */ ++ mem = 0x106; ++ break; ++ default: ++ return -EIO; ++ } ++ ++ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_APU_MEM, mem, &result); ++ if (err) { ++ pr_warn("Failed to set apu_mem: %d\n", err); ++ return err; ++ } ++ ++ pr_info("APU memory changed to %uGB, reboot required\n", requested); ++ sysfs_notify(kobj, NULL, attr->attr.name); ++ ++ asus_set_reboot_and_signal_event(); ++ ++ return count; ++} ++ ++static ssize_t apu_mem_possible_values_show(struct kobject *kobj, struct kobj_attribute *attr, ++ char *buf) ++{ ++ return sysfs_emit(buf, "0;1;2;3;4;5;6;7;8\n"); ++} ++ATTR_GROUP_ENUM_CUSTOM(apu_mem, "apu_mem", "Set available system RAM (in GB) for the APU to use"); ++ ++static int init_max_cpu_cores(void) ++{ ++ u32 cores; ++ int err; ++ ++ err = asus_wmi_get_devstate_dsts(ASUS_WMI_DEVID_CORES_MAX, &cores); ++ if (err) ++ return err; ++ ++ cores &= ~ASUS_WMI_DSTS_PRESENCE_BIT; ++ asus_armoury.rog_tunables->max_power_cores = FIELD_GET(ASUS_POWER_CORE_MASK, cores); ++ asus_armoury.rog_tunables->max_perf_cores = FIELD_GET(ASUS_PERF_CORE_MASK, cores); ++ ++ err = asus_wmi_get_devstate_dsts(ASUS_WMI_DEVID_CORES, &cores); ++ if (err) { ++ pr_err("Could not get CPU core count: error %d", err); ++ return err; ++ } ++ ++ asus_armoury.rog_tunables->cur_perf_cores = FIELD_GET(ASUS_PERF_CORE_MASK, cores); ++ asus_armoury.rog_tunables->cur_power_cores = FIELD_GET(ASUS_POWER_CORE_MASK, cores); ++ ++ asus_armoury.rog_tunables->min_perf_cores = CPU_PERF_CORE_COUNT_MIN; ++ asus_armoury.rog_tunables->min_power_cores = CPU_POWR_CORE_COUNT_MIN; ++ ++ return 0; ++} ++ ++static ssize_t cores_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, ++ enum cpu_core_type core_type, enum cpu_core_value core_value) ++{ ++ u32 cores; ++ ++ switch (core_value) { ++ case CPU_CORE_DEFAULT: ++ case CPU_CORE_MAX: ++ if (core_type == CPU_CORE_PERF) ++ return sysfs_emit(buf, "%d\n", ++ asus_armoury.rog_tunables->max_perf_cores); ++ else ++ return sysfs_emit(buf, "%d\n", ++ asus_armoury.rog_tunables->max_power_cores); ++ case CPU_CORE_MIN: ++ if (core_type == CPU_CORE_PERF) ++ return sysfs_emit(buf, "%d\n", ++ asus_armoury.rog_tunables->min_perf_cores); ++ else ++ return sysfs_emit(buf, "%d\n", ++ asus_armoury.rog_tunables->min_power_cores); ++ default: ++ break; ++ } ++ ++ if (core_type == CPU_CORE_PERF) ++ cores = asus_armoury.rog_tunables->cur_perf_cores; ++ else ++ cores = asus_armoury.rog_tunables->cur_power_cores; ++ ++ return sysfs_emit(buf, "%d\n", cores); ++} ++ ++static ssize_t cores_current_value_store(struct kobject *kobj, struct kobj_attribute *attr, ++ const char *buf, enum cpu_core_type core_type) ++{ ++ u32 new_cores, perf_cores, power_cores, out_val, min, max; ++ int result, err; ++ ++ result = kstrtou32(buf, 10, &new_cores); ++ if (result) ++ return result; ++ ++ if (core_type == CPU_CORE_PERF) { ++ perf_cores = new_cores; ++ power_cores = out_val = asus_armoury.rog_tunables->cur_power_cores; ++ min = asus_armoury.rog_tunables->min_perf_cores; ++ max = asus_armoury.rog_tunables->max_perf_cores; ++ } else { ++ perf_cores = asus_armoury.rog_tunables->cur_perf_cores; ++ power_cores = out_val = new_cores; ++ min = asus_armoury.rog_tunables->min_power_cores; ++ max = asus_armoury.rog_tunables->max_power_cores; ++ } ++ ++ if (new_cores < min || new_cores > max) ++ return -EINVAL; ++ ++ out_val = 0; ++ out_val |= FIELD_PREP(ASUS_PERF_CORE_MASK, perf_cores); ++ out_val |= FIELD_PREP(ASUS_POWER_CORE_MASK, power_cores); ++ ++ mutex_lock(&asus_armoury.mutex); ++ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_CORES, out_val, &result); ++ mutex_unlock(&asus_armoury.mutex); ++ ++ if (err) { ++ pr_warn("Failed to set CPU core count: %d\n", err); ++ return err; ++ } ++ ++ if (result > 1) { ++ pr_warn("Failed to set CPU core count (result): 0x%x\n", result); ++ return -EIO; ++ } ++ ++ pr_info("CPU core count changed, reboot required\n"); ++ sysfs_notify(kobj, NULL, attr->attr.name); ++ asus_set_reboot_and_signal_event(); ++ ++ return 0; ++} ++ ++static ssize_t cores_performance_min_value_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return cores_value_show(kobj, attr, buf, CPU_CORE_PERF, CPU_CORE_MIN); ++} ++ ++static ssize_t cores_performance_max_value_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return cores_value_show(kobj, attr, buf, CPU_CORE_PERF, CPU_CORE_MAX); ++} ++ ++static ssize_t cores_performance_default_value_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return cores_value_show(kobj, attr, buf, CPU_CORE_PERF, CPU_CORE_DEFAULT); ++} ++ ++static ssize_t cores_performance_current_value_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return cores_value_show(kobj, attr, buf, CPU_CORE_PERF, CPU_CORE_CURRENT); ++} ++ ++static ssize_t cores_performance_current_value_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int err; ++ ++ err = cores_current_value_store(kobj, attr, buf, CPU_CORE_PERF); ++ if (err) ++ return err; ++ ++ return count; ++} ++ATTR_GROUP_CORES_RW(cores_performance, "cores_performance", ++ "Set the max available performance cores"); ++ ++static ssize_t cores_efficiency_min_value_show(struct kobject *kobj, struct kobj_attribute *attr, ++ char *buf) ++{ ++ return cores_value_show(kobj, attr, buf, CPU_CORE_POWER, CPU_CORE_MIN); ++} ++ ++static ssize_t cores_efficiency_max_value_show(struct kobject *kobj, struct kobj_attribute *attr, ++ char *buf) ++{ ++ return cores_value_show(kobj, attr, buf, CPU_CORE_POWER, CPU_CORE_MAX); ++} ++ ++static ssize_t cores_efficiency_default_value_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return cores_value_show(kobj, attr, buf, CPU_CORE_POWER, CPU_CORE_DEFAULT); ++} ++ ++static ssize_t cores_efficiency_current_value_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return cores_value_show(kobj, attr, buf, CPU_CORE_POWER, CPU_CORE_CURRENT); ++} ++ ++static ssize_t cores_efficiency_current_value_store(struct kobject *kobj, ++ struct kobj_attribute *attr, const char *buf, ++ size_t count) ++{ ++ int err; + -+/* bayer formats */ -+#ifdef V4L2_PIX_FMT_SRGGB8 -+ { -+ .name = "Bayer RGGB 8bit", -+ .fourcc = V4L2_PIX_FMT_SRGGB8, -+ .depth = 8, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_SRGGB8 */ -+#ifdef V4L2_PIX_FMT_SGRBG8 -+ { -+ .name = "Bayer GRBG 8bit", -+ .fourcc = V4L2_PIX_FMT_SGRBG8, -+ .depth = 8, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_SGRBG8 */ -+#ifdef V4L2_PIX_FMT_SGBRG8 -+ { -+ .name = "Bayer GBRG 8bit", -+ .fourcc = V4L2_PIX_FMT_SGBRG8, -+ .depth = 8, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_SGBRG8 */ -+#ifdef V4L2_PIX_FMT_SBGGR8 -+ { -+ .name = "Bayer BA81 8bit", -+ .fourcc = V4L2_PIX_FMT_SBGGR8, -+ .depth = 8, -+ .flags = 0, -+ }, -+#endif /* V4L2_PIX_FMT_SBGGR8 */ ++ err = cores_current_value_store(kobj, attr, buf, CPU_CORE_POWER); ++ if (err) ++ return err; + -+ /* here come the planar formats */ -+ { -+ .name = "4:1:0, planar, Y-Cr-Cb", -+ .fourcc = V4L2_PIX_FMT_YVU410, -+ .depth = 9, -+ .flags = FORMAT_FLAGS_PLANAR, -+ }, -+ { -+ .name = "4:2:0, planar, Y-Cr-Cb", -+ .fourcc = V4L2_PIX_FMT_YVU420, -+ .depth = 12, -+ .flags = FORMAT_FLAGS_PLANAR, -+ }, -+ { -+ .name = "4:1:0, planar, Y-Cb-Cr", -+ .fourcc = V4L2_PIX_FMT_YUV410, -+ .depth = 9, -+ .flags = FORMAT_FLAGS_PLANAR, -+ }, -+ { -+ .name = "4:2:0, planar, Y-Cb-Cr", -+ .fourcc = V4L2_PIX_FMT_YUV420, -+ .depth = 12, -+ .flags = FORMAT_FLAGS_PLANAR, -+ }, -+#ifdef V4L2_PIX_FMT_YUV422P -+ { -+ .name = "16 bpp YVU422 planar", -+ .fourcc = V4L2_PIX_FMT_YUV422P, -+ .depth = 16, -+ .flags = FORMAT_FLAGS_PLANAR, -+ }, -+#endif /* V4L2_PIX_FMT_YUV422P */ -+#ifdef V4L2_PIX_FMT_YUV411P -+ { -+ .name = "16 bpp YVU411 planar", -+ .fourcc = V4L2_PIX_FMT_YUV411P, -+ .depth = 16, -+ .flags = FORMAT_FLAGS_PLANAR, -+ }, -+#endif /* V4L2_PIX_FMT_YUV411P */ -+#ifdef V4L2_PIX_FMT_Y41P -+ { -+ .name = "12 bpp YUV 4:1:1", -+ .fourcc = V4L2_PIX_FMT_Y41P, -+ .depth = 12, -+ .flags = FORMAT_FLAGS_PLANAR, -+ }, -+#endif /* V4L2_PIX_FMT_Y41P */ -+#ifdef V4L2_PIX_FMT_NV12 -+ { -+ .name = "12 bpp Y/CbCr 4:2:0 ", -+ .fourcc = V4L2_PIX_FMT_NV12, -+ .depth = 12, -+ .flags = FORMAT_FLAGS_PLANAR, -+ }, -+#endif /* V4L2_PIX_FMT_NV12 */ ++ return count; ++} ++ATTR_GROUP_CORES_RW(cores_efficiency, "cores_efficiency", ++ "Set the max available efficiency cores"); + -+/* here come the compressed formats */ ++/* Simple attribute creation */ ++ATTR_GROUP_ROG_TUNABLE(ppt_pl1_spl, "ppt_pl1_spl", ASUS_WMI_DEVID_PPT_PL1_SPL, cpu_default, ++ cpu_min, cpu_max, 1, "Set the CPU slow package limit"); ++ATTR_GROUP_ROG_TUNABLE(ppt_pl2_sppt, "ppt_pl2_sppt", ASUS_WMI_DEVID_PPT_PL2_SPPT, cpu_default, ++ cpu_min, cpu_max, 1, "Set the CPU fast package limit"); ++ATTR_GROUP_ROG_TUNABLE(ppt_pl3_fppt, "ppt_pl3_fppt", ASUS_WMI_DEVID_PPT_FPPT, cpu_default, cpu_min, ++ cpu_max, 1, "Set the CPU slow package limit"); ++ATTR_GROUP_ROG_TUNABLE(ppt_apu_sppt, "ppt_apu_sppt", ASUS_WMI_DEVID_PPT_APU_SPPT, ++ platform_default, platform_min, platform_max, 1, ++ "Set the CPU slow package limit"); ++ATTR_GROUP_ROG_TUNABLE(ppt_platform_sppt, "ppt_platform_sppt", ASUS_WMI_DEVID_PPT_PLAT_SPPT, ++ platform_default, platform_min, platform_max, 1, ++ "Set the CPU slow package limit"); ++ATTR_GROUP_ROG_TUNABLE(nv_dynamic_boost, "nv_dynamic_boost", ASUS_WMI_DEVID_NV_DYN_BOOST, ++ nv_boost_default, nv_boost_min, nv_boost_max, 1, ++ "Set the Nvidia dynamic boost limit"); ++ATTR_GROUP_ROG_TUNABLE(nv_temp_target, "nv_temp_target", ASUS_WMI_DEVID_NV_THERM_TARGET, ++ nv_temp_default, nv_boost_min, nv_temp_max, 1, ++ "Set the Nvidia max thermal limit"); ++ATTR_GROUP_ROG_TUNABLE(dgpu_tgp, "dgpu_tgp", ASUS_WMI_DEVID_DGPU_SET_TGP, dgpu_tgp_default, ++ dgpu_tgp_min, dgpu_tgp_max, 1, ++ "Set the additional TGP on top of the base TGP"); + -+#ifdef V4L2_PIX_FMT_MJPEG -+ { -+ .name = "Motion-JPEG", -+ .fourcc = V4L2_PIX_FMT_MJPEG, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_MJPEG */ -+#ifdef V4L2_PIX_FMT_JPEG -+ { -+ .name = "JFIF JPEG", -+ .fourcc = V4L2_PIX_FMT_JPEG, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_JPEG */ -+#ifdef V4L2_PIX_FMT_DV -+ { -+ .name = "DV1394", -+ .fourcc = V4L2_PIX_FMT_DV, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_DV */ -+#ifdef V4L2_PIX_FMT_MPEG -+ { -+ .name = "MPEG-1/2/4 Multiplexed", -+ .fourcc = V4L2_PIX_FMT_MPEG, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_MPEG */ -+#ifdef V4L2_PIX_FMT_H264 -+ { -+ .name = "H264 with start codes", -+ .fourcc = V4L2_PIX_FMT_H264, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_H264 */ -+#ifdef V4L2_PIX_FMT_H264_NO_SC -+ { -+ .name = "H264 without start codes", -+ .fourcc = V4L2_PIX_FMT_H264_NO_SC, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_H264_NO_SC */ -+#ifdef V4L2_PIX_FMT_H264_MVC -+ { -+ .name = "H264 MVC", -+ .fourcc = V4L2_PIX_FMT_H264_MVC, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_H264_MVC */ -+#ifdef V4L2_PIX_FMT_H263 -+ { -+ .name = "H263", -+ .fourcc = V4L2_PIX_FMT_H263, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_H263 */ -+#ifdef V4L2_PIX_FMT_MPEG1 -+ { -+ .name = "MPEG-1 ES", -+ .fourcc = V4L2_PIX_FMT_MPEG1, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_MPEG1 */ -+#ifdef V4L2_PIX_FMT_MPEG2 -+ { -+ .name = "MPEG-2 ES", -+ .fourcc = V4L2_PIX_FMT_MPEG2, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_MPEG2 */ -+#ifdef V4L2_PIX_FMT_MPEG4 -+ { -+ .name = "MPEG-4 part 2 ES", -+ .fourcc = V4L2_PIX_FMT_MPEG4, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_MPEG4 */ -+#ifdef V4L2_PIX_FMT_XVID -+ { -+ .name = "Xvid", -+ .fourcc = V4L2_PIX_FMT_XVID, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_XVID */ -+#ifdef V4L2_PIX_FMT_VC1_ANNEX_G -+ { -+ .name = "SMPTE 421M Annex G compliant stream", -+ .fourcc = V4L2_PIX_FMT_VC1_ANNEX_G, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_VC1_ANNEX_G */ -+#ifdef V4L2_PIX_FMT_VC1_ANNEX_L -+ { -+ .name = "SMPTE 421M Annex L compliant stream", -+ .fourcc = V4L2_PIX_FMT_VC1_ANNEX_L, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_VC1_ANNEX_L */ -+#ifdef V4L2_PIX_FMT_VP8 -+ { -+ .name = "VP8", -+ .fourcc = V4L2_PIX_FMT_VP8, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_VP8 */ -+#ifdef V4L2_PIX_FMT_VP9 -+ { -+ .name = "VP9", -+ .fourcc = V4L2_PIX_FMT_VP9, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_VP9 */ -+#ifdef V4L2_PIX_FMT_HEVC -+ { -+ .name = "HEVC", -+ .fourcc = V4L2_PIX_FMT_HEVC, -+ .depth = 32, -+ .flags = FORMAT_FLAGS_COMPRESSED, -+ }, -+#endif /* V4L2_PIX_FMT_HEVC */ ++ATTR_GROUP_INT_VALUE_ONLY_RO(dgpu_base_tgp, "dgpu_base_tgp", ASUS_WMI_DEVID_DGPU_BASE_TGP, ++ "Read the base TGP value"); ++ ++ATTR_GROUP_ENUM_INT_RO(charge_mode, "charge_mode", ASUS_WMI_DEVID_CHARGE_MODE, "0;1;2", ++ "Show the current mode of charging"); ++ ++ATTR_GROUP_BOOL_RW(boot_sound, "boot_sound", ASUS_WMI_DEVID_BOOT_SOUND, ++ "Set the boot POST sound"); ++ATTR_GROUP_BOOL_RW(mcu_powersave, "mcu_powersave", ASUS_WMI_DEVID_MCU_POWERSAVE, ++ "Set MCU powersaving mode"); ++ATTR_GROUP_BOOL_RW(panel_od, "panel_overdrive", ASUS_WMI_DEVID_PANEL_OD, ++ "Set the panel refresh overdrive"); ++ATTR_GROUP_BOOL_RW(panel_hd_mode, "panel_hd_mode", ASUS_WMI_DEVID_PANEL_HD, ++ "Set the panel HD mode to UHD<0> or FHD<1>"); ++ATTR_GROUP_BOOL_RO(egpu_connected, "egpu_connected", ASUS_WMI_DEVID_EGPU_CONNECTED, ++ "Show the eGPU connection status"); ++ ++/* If an attribute does not require any special case handling add it here */ ++static const struct asus_attr_group armoury_attr_groups[] = { ++ { &egpu_connected_attr_group, ASUS_WMI_DEVID_EGPU_CONNECTED }, ++ { &egpu_enable_attr_group, ASUS_WMI_DEVID_EGPU }, ++ { &dgpu_disable_attr_group, ASUS_WMI_DEVID_DGPU }, ++ ++ { &ppt_pl1_spl_attr_group, ASUS_WMI_DEVID_PPT_PL1_SPL }, ++ { &ppt_pl2_sppt_attr_group, ASUS_WMI_DEVID_PPT_PL2_SPPT }, ++ { &ppt_pl3_fppt_attr_group, ASUS_WMI_DEVID_PPT_FPPT }, ++ { &ppt_apu_sppt_attr_group, ASUS_WMI_DEVID_PPT_APU_SPPT }, ++ { &ppt_platform_sppt_attr_group, ASUS_WMI_DEVID_PPT_PLAT_SPPT }, ++ { &nv_dynamic_boost_attr_group, ASUS_WMI_DEVID_NV_DYN_BOOST }, ++ { &nv_temp_target_attr_group, ASUS_WMI_DEVID_NV_THERM_TARGET }, ++ { &dgpu_base_tgp_attr_group, ASUS_WMI_DEVID_DGPU_BASE_TGP }, ++ { &dgpu_tgp_attr_group, ASUS_WMI_DEVID_DGPU_SET_TGP }, ++ { &apu_mem_attr_group, ASUS_WMI_DEVID_APU_MEM }, ++ { &cores_efficiency_attr_group, ASUS_WMI_DEVID_CORES_MAX }, ++ { &cores_performance_attr_group, ASUS_WMI_DEVID_CORES_MAX }, ++ ++ { &charge_mode_attr_group, ASUS_WMI_DEVID_CHARGE_MODE }, ++ { &boot_sound_attr_group, ASUS_WMI_DEVID_BOOT_SOUND }, ++ { &mcu_powersave_attr_group, ASUS_WMI_DEVID_MCU_POWERSAVE }, ++ { &panel_od_attr_group, ASUS_WMI_DEVID_PANEL_OD }, ++ { &panel_hd_mode_attr_group, ASUS_WMI_DEVID_PANEL_HD }, +}; -diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig -index ae23b317a64e..7417c480275a 100644 ---- a/drivers/mfd/Kconfig -+++ b/drivers/mfd/Kconfig -@@ -2414,5 +2414,16 @@ config MFD_RSMU_SPI - Additional drivers must be enabled in order to use the functionality - of the device. - -+config MFD_STEAMDECK -+ tristate "Valve Steam Deck" -+ select MFD_CORE -+ depends on ACPI -+ depends on X86_64 || COMPILE_TEST -+ help -+ This driver registers various MFD cells that expose aspects -+ of Steam Deck specific ACPI functionality. + -+ Say N here, unless you are running on Steam Deck hardware. ++static int asus_fw_attr_add(void) ++{ ++ int err, i; ++ ++ err = fw_attributes_class_get(&fw_attr_class); ++ if (err) ++ return err; ++ ++ asus_armoury.fw_attr_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0), ++ NULL, "%s", DRIVER_NAME); ++ if (IS_ERR(asus_armoury.fw_attr_dev)) { ++ err = PTR_ERR(asus_armoury.fw_attr_dev); ++ goto fail_class_get; ++ } ++ ++ asus_armoury.fw_attr_kset = kset_create_and_add("attributes", NULL, ++ &asus_armoury.fw_attr_dev->kobj); ++ if (!asus_armoury.fw_attr_kset) { ++ err = -ENOMEM; ++ goto err_destroy_classdev; ++ } ++ ++ err = sysfs_create_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr); ++ if (err) { ++ pr_err("Failed to create sysfs level attributes\n"); ++ goto err_destroy_kset; ++ } ++ ++ asus_armoury.mini_led_dev_id = 0; ++ if (asus_wmi_is_present(ASUS_WMI_DEVID_MINI_LED_MODE)) { ++ asus_armoury.mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE; ++ } else if (asus_wmi_is_present(ASUS_WMI_DEVID_MINI_LED_MODE2)) { ++ asus_armoury.mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE2; ++ } ++ ++ if (asus_armoury.mini_led_dev_id) { ++ err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj, &mini_led_mode_attr_group); ++ if (err) { ++ pr_err("Failed to create sysfs-group for mini_led\n"); ++ goto err_remove_file; ++ } ++ } ++ ++ asus_armoury.gpu_mux_dev_id = 0; ++ if (asus_wmi_is_present(ASUS_WMI_DEVID_GPU_MUX)) { ++ asus_armoury.gpu_mux_dev_id = ASUS_WMI_DEVID_GPU_MUX; ++ } else if (asus_wmi_is_present(ASUS_WMI_DEVID_GPU_MUX_VIVO)) { ++ asus_armoury.gpu_mux_dev_id = ASUS_WMI_DEVID_GPU_MUX_VIVO; ++ } ++ ++ if (asus_armoury.gpu_mux_dev_id) { ++ err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj, &gpu_mux_mode_attr_group); ++ if (err) { ++ pr_err("Failed to create sysfs-group for gpu_mux\n"); ++ goto err_remove_mini_led_group; ++ } ++ } ++ ++ for (i = 0; i < ARRAY_SIZE(armoury_attr_groups); i++) { ++ if (!asus_wmi_is_present(armoury_attr_groups[i].wmi_devid)) ++ continue; ++ ++ err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj, ++ armoury_attr_groups[i].attr_group); ++ if (err) { ++ pr_err("Failed to create sysfs-group for %s\n", ++ armoury_attr_groups[i].attr_group->name); ++ goto err_remove_groups; ++ } ++ } ++ ++ return 0; ++ ++err_remove_groups: ++ while (--i >= 0) { ++ if (asus_wmi_is_present(armoury_attr_groups[i].wmi_devid)) ++ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, armoury_attr_groups[i].attr_group); ++ } ++ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &gpu_mux_mode_attr_group); ++err_remove_mini_led_group: ++ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &mini_led_mode_attr_group); ++err_remove_file: ++ sysfs_remove_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr); ++err_destroy_kset: ++ kset_unregister(asus_armoury.fw_attr_kset); ++err_destroy_classdev: ++ device_destroy(fw_attr_class, MKDEV(0, 0)); ++fail_class_get: ++ fw_attributes_class_put(); ++ return err; ++} ++ ++/* Init / exit ****************************************************************/ ++ ++/* Set up the min/max and defaults for ROG tunables */ ++static void init_rog_tunables(struct rog_tunables *rog) ++{ ++ u32 platform_default = PPT_PLATFORM_DEFAULT; ++ u32 cpu_default = PPT_CPU_LIMIT_DEFAULT; ++ u32 platform_max = PPT_PLATFORM_MAX; ++ u32 max_boost = NVIDIA_BOOST_MAX; ++ u32 cpu_max = PPT_CPU_LIMIT_MAX; ++ const char *product; ++ ++ /* ++ * ASUS product_name contains everything required, e.g, ++ * "ROG Flow X16 GV601VV_GV601VV_00185149B". ++ * The bulk of these defaults are gained from users reporting what ++ * ASUS Armoury Crate in Windows provides them. ++ * This should be turned in to a table eventually. ++ */ ++ product = dmi_get_system_info(DMI_PRODUCT_NAME); ++ ++ if (strstr(product, "GA402R")) { ++ cpu_default = 125; ++ } else if (strstr(product, "13QY")) { ++ cpu_max = 250; ++ } else if (strstr(product, "X13")) { ++ cpu_max = 75; ++ cpu_default = 50; ++ } else if (strstr(product, "RC71") || strstr(product, "RC72")) { ++ cpu_max = 50; ++ cpu_default = 30; ++ } else if (strstr(product, "G814") || strstr(product, "G614") || ++ strstr(product, "G834") || strstr(product, "G634")) { ++ cpu_max = 175; ++ } else if (strstr(product, "GA402X") || strstr(product, "GA403") || ++ strstr(product, "FA507N") || strstr(product, "FA507X") || ++ strstr(product, "FA707N") || strstr(product, "FA707X")) { ++ cpu_max = 90; ++ } else { ++ pr_notice("Using default CPU limits. Please report if these are not correct.\n"); ++ } ++ ++ if (strstr(product, "GZ301ZE")) ++ max_boost = 5; ++ else if (strstr(product, "FX507ZC4")) ++ max_boost = 15; ++ else if (strstr(product, "GU605")) ++ max_boost = 20; ++ ++ /* ensure defaults for tunables */ ++ rog->cpu_default = cpu_default; ++ rog->cpu_min = PPT_CPU_LIMIT_MIN; ++ rog->cpu_max = cpu_max; ++ ++ rog->platform_default = platform_default; ++ rog->platform_max = PPT_PLATFORM_MIN; ++ rog->platform_max = platform_max; ++ ++ rog->ppt_pl1_spl = cpu_default; ++ rog->ppt_pl2_sppt = cpu_default; ++ rog->ppt_pl3_fppt = cpu_default; ++ rog->ppt_apu_sppt = cpu_default; ++ rog->ppt_platform_sppt = platform_default; ++ ++ rog->nv_boost_default = NVIDIA_BOOST_MAX; ++ rog->nv_boost_min = NVIDIA_BOOST_MIN; ++ rog->nv_boost_max = max_boost; ++ rog->nv_dynamic_boost = NVIDIA_BOOST_MIN; ++ ++ rog->nv_temp_default = NVIDIA_TEMP_MAX; ++ rog->nv_temp_min = NVIDIA_TEMP_MIN; ++ rog->nv_temp_max = NVIDIA_TEMP_MAX; ++ rog->nv_temp_target = NVIDIA_TEMP_MIN; ++ ++ rog->dgpu_tgp_default = NVIDIA_POWER_DEFAULT; ++ rog->dgpu_tgp_min = NVIDIA_POWER_MIN; ++ rog->dgpu_tgp_max = NVIDIA_POWER_MAX; ++ rog->dgpu_tgp = NVIDIA_POWER_MAX; ++} ++ ++static int __init asus_fw_init(void) ++{ ++ char *wmi_uid; ++ int err; ++ ++ wmi_uid = wmi_get_acpi_device_uid(ASUS_WMI_MGMT_GUID); ++ if (!wmi_uid) ++ return -ENODEV; ++ ++ /* ++ * if equal to "ASUSWMI" then it's DCTS that can't be used for this ++ * driver, DSTS is required. ++ */ ++ if (!strcmp(wmi_uid, ASUS_ACPI_UID_ASUSWMI)) ++ return -ENODEV; ++ ++ asus_armoury.rog_tunables = kzalloc(sizeof(struct rog_tunables), GFP_KERNEL); ++ if (!asus_armoury.rog_tunables) ++ return -ENOMEM; ++ ++ init_rog_tunables(asus_armoury.rog_tunables); ++ if (asus_wmi_is_present(ASUS_WMI_DEVID_CORES_MAX)) { ++ err = init_max_cpu_cores(); ++ if (err) { ++ kfree(asus_armoury.rog_tunables); ++ pr_err("Could not initialise CPU core control %d\n", err); ++ return err; ++ } ++ } ++ ++ err = asus_fw_attr_add(); ++ if (err) ++ return err; + - endmenu - endif -diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile -index e057d6d6faef..2f9f8a0daf5e 100644 ---- a/drivers/mfd/Makefile -+++ b/drivers/mfd/Makefile -@@ -290,3 +290,5 @@ obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o - - obj-$(CONFIG_MFD_RSMU_I2C) += rsmu_i2c.o rsmu_core.o - obj-$(CONFIG_MFD_RSMU_SPI) += rsmu_spi.o rsmu_core.o ++ return 0; ++} + -+obj-$(CONFIG_MFD_STEAMDECK) += steamdeck.o -diff --git a/drivers/mfd/steamdeck.c b/drivers/mfd/steamdeck.c ++static void __exit asus_fw_exit(void) ++{ ++ mutex_lock(&asus_armoury.mutex); ++ ++ sysfs_remove_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr); ++ kset_unregister(asus_armoury.fw_attr_kset); ++ device_destroy(fw_attr_class, MKDEV(0, 0)); ++ fw_attributes_class_put(); ++ ++ mutex_unlock(&asus_armoury.mutex); ++} ++ ++module_init(asus_fw_init); ++module_exit(asus_fw_exit); ++ ++MODULE_IMPORT_NS("ASUS_WMI"); ++MODULE_AUTHOR("Luke Jones "); ++MODULE_DESCRIPTION("ASUS BIOS Configuration Driver"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("wmi:" ASUS_NB_WMI_EVENT_GUID); +diff --git a/drivers/platform/x86/asus-armoury.h b/drivers/platform/x86/asus-armoury.h new file mode 100644 -index 000000000000..a60fa7db9141 +index 000000000000..2620708d3994 --- /dev/null -+++ b/drivers/mfd/steamdeck.c -@@ -0,0 +1,147 @@ -+// SPDX-License-Identifier: GPL-2.0+ -+ -+/* -+ * Steam Deck EC MFD core driver ++++ b/drivers/platform/x86/asus-armoury.h +@@ -0,0 +1,258 @@ ++/* SPDX-License-Identifier: GPL-2.0 + * -+ * Copyright (C) 2021-2022 Valve Corporation ++ * Definitions for kernel modules using asus-armoury driver + * ++ * Copyright (c) 2024 Luke Jones + */ + -+#include -+#include -+#include -+ -+#define STEAMDECK_STA_OK \ -+ (ACPI_STA_DEVICE_ENABLED | \ -+ ACPI_STA_DEVICE_PRESENT | \ -+ ACPI_STA_DEVICE_FUNCTIONING) ++#ifndef _ASUS_ARMOURY_H_ ++#define _ASUS_ARMOURY_H_ + -+struct steamdeck { -+ struct acpi_device *adev; -+ struct device *dev; -+}; ++#include ++#include + -+#define STEAMDECK_ATTR_RO(_name, _method) \ -+ static ssize_t _name##_show(struct device *dev, \ -+ struct device_attribute *attr, \ -+ char *buf) \ -+ { \ -+ struct steamdeck *sd = dev_get_drvdata(dev); \ -+ unsigned long long val; \ -+ \ -+ if (ACPI_FAILURE(acpi_evaluate_integer( \ -+ sd->adev->handle, \ -+ _method, NULL, &val))) \ -+ return -EIO; \ -+ \ -+ return sysfs_emit(buf, "%llu\n", val); \ -+ } \ -+ static DEVICE_ATTR_RO(_name) ++#define DRIVER_NAME "asus-armoury" + -+STEAMDECK_ATTR_RO(firmware_version, "PDFW"); -+STEAMDECK_ATTR_RO(board_id, "BOID"); ++static ssize_t attr_uint_store(struct kobject *kobj, struct kobj_attribute *attr, ++ const char *buf, size_t count, u32 min, u32 max, ++ u32 *store_value, u32 wmi_dev); + -+static ssize_t controller_board_power_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) ++static ssize_t int_type_show(struct kobject *kobj, struct kobj_attribute *attr, ++ char *buf) +{ -+ struct steamdeck *sd = dev_get_drvdata(dev); -+ bool enabled; -+ ssize_t ret = kstrtobool(buf, &enabled); -+ -+ if (ret) -+ return ret; -+ -+ if (ACPI_FAILURE(acpi_execute_simple_method(sd->adev->handle, -+ "SCBP", enabled))) -+ return -EIO; ++ return sysfs_emit(buf, "integer\n"); ++} + -+ return count; ++static ssize_t enum_type_show(struct kobject *kobj, struct kobj_attribute *attr, ++ char *buf) ++{ ++ return sysfs_emit(buf, "enumeration\n"); +} -+static DEVICE_ATTR_WO(controller_board_power); + -+static struct attribute *steamdeck_attrs[] = { -+ &dev_attr_firmware_version.attr, -+ &dev_attr_board_id.attr, -+ &dev_attr_controller_board_power.attr, -+ NULL -+}; ++#define __ASUS_ATTR_RO(_func, _name) \ ++ { \ ++ .attr = { .name = __stringify(_name), .mode = 0444 }, \ ++ .show = _func##_##_name##_show, \ ++ } + -+ATTRIBUTE_GROUPS(steamdeck); ++#define __ASUS_ATTR_RO_AS(_name, _show) \ ++ { \ ++ .attr = { .name = __stringify(_name), .mode = 0444 }, \ ++ .show = _show, \ ++ } + -+static const struct mfd_cell steamdeck_cells[] = { -+ { .name = "steamdeck-hwmon" }, -+ { .name = "steamdeck-leds" }, -+ { .name = "steamdeck-extcon" }, -+}; ++#define __ASUS_ATTR_RW(_func, _name) \ ++ __ATTR(_name, 0644, _func##_##_name##_show, _func##_##_name##_store) + -+static void steamdeck_remove_sysfs_groups(void *data) -+{ -+ struct steamdeck *sd = data; ++#define __WMI_STORE_INT(_attr, _min, _max, _wmi) \ ++ static ssize_t _attr##_store(struct kobject *kobj, \ ++ struct kobj_attribute *attr, \ ++ const char *buf, size_t count) \ ++ { \ ++ return attr_uint_store(kobj, attr, buf, count, _min, _max, \ ++ NULL, _wmi); \ ++ } + -+ sysfs_remove_groups(&sd->dev->kobj, steamdeck_groups); -+} ++#define WMI_SHOW_INT(_attr, _fmt, _wmi) \ ++ static ssize_t _attr##_show(struct kobject *kobj, \ ++ struct kobj_attribute *attr, char *buf) \ ++ { \ ++ u32 result; \ ++ int err; \ ++ \ ++ err = asus_wmi_get_devstate_dsts(_wmi, &result); \ ++ if (err) \ ++ return err; \ ++ return sysfs_emit(buf, _fmt, \ ++ result & ~ASUS_WMI_DSTS_PRESENCE_BIT); \ ++ } + -+static int steamdeck_probe(struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ unsigned long long sta; -+ struct steamdeck *sd; -+ acpi_status status; -+ int ret; ++/* Create functions and attributes for use in other macros or on their own */ + -+ sd = devm_kzalloc(dev, sizeof(*sd), GFP_KERNEL); -+ if (!sd) -+ return -ENOMEM; -+ sd->adev = ACPI_COMPANION(dev); -+ sd->dev = dev; -+ platform_set_drvdata(pdev, sd); ++#define __ATTR_CURRENT_INT_RO(_attr, _wmi) \ ++ WMI_SHOW_INT(_attr##_current_value, "%d\n", _wmi); \ ++ static struct kobj_attribute attr_##_attr##_current_value = \ ++ __ASUS_ATTR_RO(_attr, current_value) + -+ status = acpi_evaluate_integer(sd->adev->handle, "_STA", -+ NULL, &sta); -+ if (ACPI_FAILURE(status)) { -+ dev_err(dev, "Status check failed (0x%x)\n", status); -+ return -EINVAL; -+ } ++#define __ATTR_CURRENT_INT_RW(_attr, _minv, _maxv, _wmi) \ ++ __WMI_STORE_INT(_attr##_current_value, _minv, _maxv, _wmi); \ ++ WMI_SHOW_INT(_attr##_current_value, "%d\n", _wmi); \ ++ static struct kobj_attribute attr_##_attr##_current_value = \ ++ __ASUS_ATTR_RW(_attr, current_value) + -+ if ((sta & STEAMDECK_STA_OK) != STEAMDECK_STA_OK) { -+ dev_err(dev, "Device is not ready\n"); -+ return -EINVAL; -+ } ++/* Shows a formatted static variable */ ++#define __ATTR_SHOW_FMT(_prop, _attrname, _fmt, _val) \ ++ static ssize_t _attrname##_##_prop##_show( \ ++ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ ++ { \ ++ return sysfs_emit(buf, _fmt, _val); \ ++ } \ ++ static struct kobj_attribute attr_##_attrname##_##_prop = \ ++ __ASUS_ATTR_RO(_attrname, _prop) + -+ ret = sysfs_create_groups(&dev->kobj, steamdeck_groups); -+ if (ret) { -+ dev_err(dev, "Failed to create sysfs group\n"); -+ return ret; ++/* Requires current_value_show */ ++#define __ATTR_GROUP_INT_VALUE_ONLY(_attrname, _fsname, _dispname) \ ++ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ ++ static struct kobj_attribute attr_##_attrname##_type = \ ++ __ASUS_ATTR_RO_AS(type, int_type_show); \ ++ static struct attribute *_attrname##_attrs[] = { \ ++ &attr_##_attrname##_current_value.attr, \ ++ &attr_##_attrname##_display_name.attr, \ ++ &attr_##_attrname##_type.attr, NULL \ ++ }; \ ++ static const struct attribute_group _attrname##_attr_group = { \ ++ .name = _fsname, .attrs = _attrname##_attrs \ + } + -+ ret = devm_add_action_or_reset(dev, steamdeck_remove_sysfs_groups, -+ sd); -+ if (ret) { -+ dev_err(dev, "Failed to register devres action\n"); -+ return ret; ++/* Boolean style enumeration, base macro. Requires adding show/store */ ++#define __ATTR_GROUP_ENUM(_attrname, _fsname, _possible, _dispname) \ ++ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ ++ __ATTR_SHOW_FMT(possible_values, _attrname, "%s\n", _possible); \ ++ static struct kobj_attribute attr_##_attrname##_type = \ ++ __ASUS_ATTR_RO_AS(type, enum_type_show); \ ++ static struct attribute *_attrname##_attrs[] = { \ ++ &attr_##_attrname##_current_value.attr, \ ++ &attr_##_attrname##_display_name.attr, \ ++ &attr_##_attrname##_possible_values.attr, \ ++ &attr_##_attrname##_type.attr, \ ++ NULL \ ++ }; \ ++ static const struct attribute_group _attrname##_attr_group = { \ ++ .name = _fsname, .attrs = _attrname##_attrs \ + } + -+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, -+ steamdeck_cells, ARRAY_SIZE(steamdeck_cells), -+ NULL, 0, NULL); -+} ++#define ATTR_GROUP_INT_VALUE_ONLY_RO(_attrname, _fsname, _wmi, _dispname) \ ++ __ATTR_CURRENT_INT_RO(_attrname, _wmi); \ ++ __ATTR_GROUP_INT_VALUE_ONLY(_attrname, _fsname, _dispname) + -+static const struct acpi_device_id steamdeck_device_ids[] = { -+ { "VLV0100", 0 }, -+ { "", 0 }, -+}; -+MODULE_DEVICE_TABLE(acpi, steamdeck_device_ids); ++#define ATTR_GROUP_BOOL_RO(_attrname, _fsname, _wmi, _dispname) \ ++ __ATTR_CURRENT_INT_RO(_attrname, _wmi); \ ++ __ATTR_GROUP_ENUM(_attrname, _fsname, "0;1", _dispname) + -+static struct platform_driver steamdeck_driver = { -+ .probe = steamdeck_probe, -+ .driver = { -+ .name = "steamdeck", -+ .acpi_match_table = steamdeck_device_ids, -+ }, -+}; -+module_platform_driver(steamdeck_driver); ++#define ATTR_GROUP_BOOL_RW(_attrname, _fsname, _wmi, _dispname) \ ++ __ATTR_CURRENT_INT_RW(_attrname, 0, 1, _wmi); \ ++ __ATTR_GROUP_ENUM(_attrname, _fsname, "0;1", _dispname) + -+MODULE_AUTHOR("Andrey Smirnov "); -+MODULE_DESCRIPTION("Steam Deck EC MFD core driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig -index 09cbe3f0ab1e..fb772bfe27c3 100644 ---- a/drivers/misc/Kconfig -+++ b/drivers/misc/Kconfig -@@ -517,7 +517,6 @@ config OPEN_DICE - - config NTSYNC - tristate "NT synchronization primitive emulation" -- depends on BROKEN - help - This module provides kernel support for emulation of Windows NT - synchronization primitives. It is not a hardware driver. -diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c -index 4954553b7baa..457ff28b789f 100644 ---- a/drivers/misc/ntsync.c -+++ b/drivers/misc/ntsync.c -@@ -6,11 +6,17 @@ - */ - - #include -+#include - #include - #include -+#include -+#include - #include - #include -+#include - #include -+#include -+#include - #include - #include - #include -@@ -19,6 +25,8 @@ - - enum ntsync_type { - NTSYNC_TYPE_SEM, -+ NTSYNC_TYPE_MUTEX, -+ NTSYNC_TYPE_EVENT, - }; - - /* -@@ -30,10 +38,13 @@ enum ntsync_type { - * - * Both rely on struct file for reference counting. Individual - * ntsync_obj objects take a reference to the device when created. -+ * Wait operations take a reference to each object being waited on for -+ * the duration of the wait. - */ - - struct ntsync_obj { - spinlock_t lock; -+ int dev_locked; - - enum ntsync_type type; - -@@ -46,22 +57,344 @@ struct ntsync_obj { - __u32 count; - __u32 max; - } sem; -+ struct { -+ __u32 count; -+ pid_t owner; -+ bool ownerdead; -+ } mutex; -+ struct { -+ bool manual; -+ bool signaled; -+ } event; - } u; ++/* ++ * Requires _current_value_show(), _current_value_show() ++ */ ++#define ATTR_GROUP_BOOL_CUSTOM(_attrname, _fsname, _dispname) \ ++ static struct kobj_attribute attr_##_attrname##_current_value = \ ++ __ASUS_ATTR_RW(_attrname, current_value); \ ++ __ATTR_GROUP_ENUM(_attrname, _fsname, "0;1", _dispname) + -+ /* -+ * any_waiters is protected by the object lock, but all_waiters is -+ * protected by the device wait_all_lock. -+ */ -+ struct list_head any_waiters; -+ struct list_head all_waiters; ++#define ATTR_GROUP_ENUM_INT_RO(_attrname, _fsname, _wmi, _possible, _dispname) \ ++ __ATTR_CURRENT_INT_RO(_attrname, _wmi); \ ++ __ATTR_GROUP_ENUM(_attrname, _fsname, _possible, _dispname) + -+ /* -+ * Hint describing how many tasks are queued on this object in a -+ * wait-all operation. -+ * -+ * Any time we do a wake, we may need to wake "all" waiters as well as -+ * "any" waiters. In order to atomically wake "all" waiters, we must -+ * lock all of the objects, and that means grabbing the wait_all_lock -+ * below (and, due to lock ordering rules, before locking this object). -+ * However, wait-all is a rare operation, and grabbing the wait-all -+ * lock for every wake would create unnecessary contention. -+ * Therefore we first check whether all_hint is zero, and, if it is, -+ * we skip trying to wake "all" waiters. -+ * -+ * Since wait requests must originate from user-space threads, we're -+ * limited here by PID_MAX_LIMIT, so there's no risk of overflow. -+ */ -+ atomic_t all_hint; -+}; ++/* ++ * Requires _current_value_show(), _current_value_show() ++ * and _possible_values_show() ++ */ ++#define ATTR_GROUP_ENUM_CUSTOM(_attrname, _fsname, _dispname) \ ++ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ ++ static struct kobj_attribute attr_##_attrname##_current_value = \ ++ __ASUS_ATTR_RW(_attrname, current_value); \ ++ static struct kobj_attribute attr_##_attrname##_possible_values = \ ++ __ASUS_ATTR_RO(_attrname, possible_values); \ ++ static struct kobj_attribute attr_##_attrname##_type = \ ++ __ASUS_ATTR_RO_AS(type, enum_type_show); \ ++ static struct attribute *_attrname##_attrs[] = { \ ++ &attr_##_attrname##_current_value.attr, \ ++ &attr_##_attrname##_display_name.attr, \ ++ &attr_##_attrname##_possible_values.attr, \ ++ &attr_##_attrname##_type.attr, \ ++ NULL \ ++ }; \ ++ static const struct attribute_group _attrname##_attr_group = { \ ++ .name = _fsname, .attrs = _attrname##_attrs \ ++ } + -+struct ntsync_q_entry { -+ struct list_head node; -+ struct ntsync_q *q; -+ struct ntsync_obj *obj; -+ __u32 index; -+}; ++/* CPU core attributes need a little different in setup */ ++#define ATTR_GROUP_CORES_RW(_attrname, _fsname, _dispname) \ ++ __ATTR_SHOW_FMT(scalar_increment, _attrname, "%d\n", 1); \ ++ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ ++ static struct kobj_attribute attr_##_attrname##_current_value = \ ++ __ASUS_ATTR_RW(_attrname, current_value); \ ++ static struct kobj_attribute attr_##_attrname##_default_value = \ ++ __ASUS_ATTR_RO(_attrname, default_value); \ ++ static struct kobj_attribute attr_##_attrname##_min_value = \ ++ __ASUS_ATTR_RO(_attrname, min_value); \ ++ static struct kobj_attribute attr_##_attrname##_max_value = \ ++ __ASUS_ATTR_RO(_attrname, max_value); \ ++ static struct kobj_attribute attr_##_attrname##_type = \ ++ __ASUS_ATTR_RO_AS(type, int_type_show); \ ++ static struct attribute *_attrname##_attrs[] = { \ ++ &attr_##_attrname##_current_value.attr, \ ++ &attr_##_attrname##_default_value.attr, \ ++ &attr_##_attrname##_min_value.attr, \ ++ &attr_##_attrname##_max_value.attr, \ ++ &attr_##_attrname##_scalar_increment.attr, \ ++ &attr_##_attrname##_display_name.attr, \ ++ &attr_##_attrname##_type.attr, \ ++ NULL \ ++ }; \ ++ static const struct attribute_group _attrname##_attr_group = { \ ++ .name = _fsname, .attrs = _attrname##_attrs \ ++ } + -+struct ntsync_q { -+ struct task_struct *task; -+ __u32 owner; ++/* ++ * ROG PPT attributes need a little different in setup as they ++ * require rog_tunables members. ++ */ + -+ /* -+ * Protected via atomic_try_cmpxchg(). Only the thread that wins the -+ * compare-and-swap may actually change object states and wake this -+ * task. -+ */ -+ atomic_t signaled; ++#define __ROG_TUNABLE_RW(_attr, _min, _max, _wmi) \ ++ static ssize_t _attr##_current_value_store( \ ++ struct kobject *kobj, struct kobj_attribute *attr, \ ++ const char *buf, size_t count) \ ++ { \ ++ return attr_uint_store(kobj, attr, buf, count, \ ++ asus_armoury.rog_tunables->_min, \ ++ asus_armoury.rog_tunables->_max, \ ++ &asus_armoury.rog_tunables->_attr, \ ++ _wmi); \ ++ } \ ++ static ssize_t _attr##_current_value_show( \ ++ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ ++ { \ ++ return sysfs_emit(buf, "%u\n", \ ++ asus_armoury.rog_tunables->_attr); \ ++ } \ ++ static struct kobj_attribute attr_##_attr##_current_value = \ ++ __ASUS_ATTR_RW(_attr, current_value) + -+ bool all; -+ bool ownerdead; -+ __u32 count; -+ struct ntsync_q_entry entries[]; - }; - - struct ntsync_device { -+ /* -+ * Wait-all operations must atomically grab all objects, and be totally -+ * ordered with respect to each other and wait-any operations. -+ * If one thread is trying to acquire several objects, another thread -+ * cannot touch the object at the same time. -+ * -+ * This device-wide lock is used to serialize wait-for-all -+ * operations, and operations on an object that is involved in a -+ * wait-for-all. -+ */ -+ struct mutex wait_all_lock; ++#define __ROG_TUNABLE_SHOW(_prop, _attrname, _val) \ ++ static ssize_t _attrname##_##_prop##_show( \ ++ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ ++ { \ ++ return sysfs_emit(buf, "%d\n", \ ++ asus_armoury.rog_tunables->_val); \ ++ } \ ++ static struct kobj_attribute attr_##_attrname##_##_prop = \ ++ __ASUS_ATTR_RO(_attrname, _prop) ++ ++#define ATTR_GROUP_ROG_TUNABLE(_attrname, _fsname, _wmi, _default, _min, _max, \ ++ _incstep, _dispname) \ ++ __ROG_TUNABLE_SHOW(default_value, _attrname, _default); \ ++ __ROG_TUNABLE_RW(_attrname, _min, _max, _wmi); \ ++ __ROG_TUNABLE_SHOW(min_value, _attrname, _min); \ ++ __ROG_TUNABLE_SHOW(max_value, _attrname, _max); \ ++ __ATTR_SHOW_FMT(scalar_increment, _attrname, "%d\n", _incstep); \ ++ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ ++ static struct kobj_attribute attr_##_attrname##_type = \ ++ __ASUS_ATTR_RO_AS(type, int_type_show); \ ++ static struct attribute *_attrname##_attrs[] = { \ ++ &attr_##_attrname##_current_value.attr, \ ++ &attr_##_attrname##_default_value.attr, \ ++ &attr_##_attrname##_min_value.attr, \ ++ &attr_##_attrname##_max_value.attr, \ ++ &attr_##_attrname##_scalar_increment.attr, \ ++ &attr_##_attrname##_display_name.attr, \ ++ &attr_##_attrname##_type.attr, \ ++ NULL \ ++ }; \ ++ static const struct attribute_group _attrname##_attr_group = { \ ++ .name = _fsname, .attrs = _attrname##_attrs \ ++ } + - struct file *file; - }; ++#endif /* _ASUS_BIOSCFG_H_ */ +diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c +index 8bd187e8b47f..6bd89cd0acdf 100644 +--- a/drivers/platform/x86/asus-wmi.c ++++ b/drivers/platform/x86/asus-wmi.c +@@ -55,8 +55,6 @@ module_param(fnlock_default, bool, 0444); + #define to_asus_wmi_driver(pdrv) \ + (container_of((pdrv), struct asus_wmi_driver, platform_driver)) + +-#define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66" +- + #define NOTIFY_BRNUP_MIN 0x11 + #define NOTIFY_BRNUP_MAX 0x1f + #define NOTIFY_BRNDOWN_MIN 0x20 +@@ -105,8 +103,6 @@ module_param(fnlock_default, bool, 0444); + #define USB_INTEL_XUSB2PR 0xD0 + #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 + +-#define ASUS_ACPI_UID_ASUSWMI "ASUSWMI" +- + #define WMI_EVENT_MASK 0xFFFF + #define FAN_CURVE_POINTS 8 +@@ -142,16 +138,20 @@ module_param(fnlock_default, bool, 0444); + #define ASUS_MINI_LED_2024_STRONG 0x01 + #define ASUS_MINI_LED_2024_OFF 0x02 + +-/* Controls the power state of the USB0 hub on ROG Ally which input is on */ + #define ASUS_USB0_PWR_EC0_CSEE "\\_SB.PCI0.SBRG.EC0.CSEE" +-/* 300ms so far seems to produce a reliable result on AC and battery */ +-#define ASUS_USB0_PWR_EC0_CSEE_WAIT 1500 +/* -+ * Single objects are locked using obj->lock. -+ * -+ * Multiple objects are 'locked' while holding dev->wait_all_lock. -+ * In this case however, individual objects are not locked by holding -+ * obj->lock, but by setting obj->dev_locked. -+ * -+ * This means that in order to lock a single object, the sequence is slightly -+ * more complicated than usual. Specifically it needs to check obj->dev_locked -+ * after acquiring obj->lock, if set, it needs to drop the lock and acquire -+ * dev->wait_all_lock in order to serialize against the multi-object operation. ++ * The period required to wait after screen off/on/s2idle.check in MS. ++ * Time here greatly impacts the wake behaviour. Used in suspend/wake. + */ ++#define ASUS_USB0_PWR_EC0_CSEE_WAIT 600 ++#define ASUS_USB0_PWR_EC0_CSEE_OFF 0xB7 ++#define ASUS_USB0_PWR_EC0_CSEE_ON 0xB8 + + static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL }; + + static int throttle_thermal_policy_write(struct asus_wmi *); + +-static const struct dmi_system_id asus_ally_mcu_quirk[] = { ++static const struct dmi_system_id asus_rog_ally_device[] = { + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "RC71L"), +@@ -274,9 +274,6 @@ struct asus_wmi { + u32 tablet_switch_dev_id; + bool tablet_switch_inverted; + +- /* The ROG Ally device requires the MCU USB device be disconnected before suspend */ +- bool ally_mcu_usb_switch; +- + enum fan_type fan_type; + enum fan_type gpu_fan_type; + enum fan_type mid_fan_type; +@@ -289,11 +286,12 @@ struct asus_wmi { + u8 fan_boost_mode_mask; + u8 fan_boost_mode; + + -+static void dev_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) -+{ -+ lockdep_assert_held(&dev->wait_all_lock); -+ lockdep_assert(obj->dev == dev); -+ spin_lock(&obj->lock); -+ /* -+ * By setting obj->dev_locked inside obj->lock, it is ensured that -+ * anyone holding obj->lock must see the value. -+ */ -+ obj->dev_locked = 1; -+ spin_unlock(&obj->lock); -+} -+ -+static void dev_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) -+{ -+ lockdep_assert_held(&dev->wait_all_lock); -+ lockdep_assert(obj->dev == dev); -+ spin_lock(&obj->lock); -+ obj->dev_locked = 0; -+ spin_unlock(&obj->lock); -+} -+ -+static void obj_lock(struct ntsync_obj *obj) -+{ -+ struct ntsync_device *dev = obj->dev; -+ -+ for (;;) { -+ spin_lock(&obj->lock); -+ if (likely(!obj->dev_locked)) -+ break; -+ -+ spin_unlock(&obj->lock); -+ mutex_lock(&dev->wait_all_lock); -+ spin_lock(&obj->lock); -+ /* -+ * obj->dev_locked should be set and released under the same -+ * wait_all_lock section, since we now own this lock, it should -+ * be clear. -+ */ -+ lockdep_assert(!obj->dev_locked); -+ spin_unlock(&obj->lock); -+ mutex_unlock(&dev->wait_all_lock); -+ } -+} ++ /* Tunables provided by ASUS for gaming laptops */ ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + bool egpu_enable_available; + bool dgpu_disable_available; + u32 gpu_mux_dev; +- +- /* Tunables provided by ASUS for gaming laptops */ + u32 ppt_pl2_sppt; + u32 ppt_pl1_spl; + u32 ppt_apu_sppt; +@@ -301,6 +299,9 @@ struct asus_wmi { + u32 ppt_fppt; + u32 nv_dynamic_boost; + u32 nv_temp_target; ++ bool panel_overdrive_available; ++ u32 mini_led_dev_id; ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + u32 kbd_rgb_dev; + bool kbd_rgb_state_available; +@@ -319,9 +320,6 @@ struct asus_wmi { + // The RSOC controls the maximum charging percentage. + bool battery_rsoc_available; + +- bool panel_overdrive_available; +- u32 mini_led_dev_id; +- + struct hotplug_slot hotplug_slot; + struct mutex hotplug_lock; + struct mutex wmi_lock; +@@ -335,6 +333,17 @@ struct asus_wmi { + struct asus_wmi_driver *driver; + }; + ++static bool ally_mcu_usb_plug; + -+static void obj_unlock(struct ntsync_obj *obj) ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) ++static void asus_wmi_show_deprecated(void) +{ -+ spin_unlock(&obj->lock); ++ pr_notice_once("Accessing attributes through /sys/bus/platform/asus_wmi " ++ "is deprecated and will be removed in a future release. Please " ++ "switch over to /sys/class/firmware_attributes.\n"); +} ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + -+static bool ntsync_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj) + /* WMI ************************************************************************/ + + static int asus_wmi_evaluate_method3(u32 method_id, +@@ -385,7 +394,7 @@ int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval) + { + return asus_wmi_evaluate_method3(method_id, arg0, arg1, 0, retval); + } +-EXPORT_SYMBOL_GPL(asus_wmi_evaluate_method); ++EXPORT_SYMBOL_NS_GPL(asus_wmi_evaluate_method, "ASUS_WMI"); + + static int asus_wmi_evaluate_method5(u32 method_id, + u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 *retval) +@@ -549,12 +558,50 @@ static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval) + return 0; + } + +-static int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, +- u32 *retval) ++/** ++ * asus_wmi_get_devstate_dsts() - Get the WMI function state. ++ * @dev_id: The WMI method ID to call. ++ * @retval: A pointer to where to store the value returned from WMI. ++ * ++ * On success the return value is 0, and the retval is a valid value returned ++ * by the successful WMI function call otherwise an error is returned if the ++ * call failed, or if the WMI method ID is unsupported. ++ */ ++int asus_wmi_get_devstate_dsts(u32 dev_id, u32 *retval) +{ -+ bool all; ++ int err; + -+ obj_lock(obj); -+ all = atomic_read(&obj->all_hint); -+ if (unlikely(all)) { -+ obj_unlock(obj); -+ mutex_lock(&dev->wait_all_lock); -+ dev_lock_obj(dev, obj); -+ } ++ err = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, dev_id, 0, retval); ++ if (err) ++ return err; + -+ return all; -+} ++ if (*retval == ASUS_WMI_UNSUPPORTED_METHOD) ++ return -ENODEV; + -+static void ntsync_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj, bool all) -+{ -+ if (all) { -+ dev_unlock_obj(dev, obj); -+ mutex_unlock(&dev->wait_all_lock); -+ } else { -+ obj_unlock(obj); -+ } ++ return 0; +} ++EXPORT_SYMBOL_NS_GPL(asus_wmi_get_devstate_dsts, "ASUS_WMI"); + -+#define ntsync_assert_held(obj) \ -+ lockdep_assert((lockdep_is_held(&(obj)->lock) != LOCK_STATE_NOT_HELD) || \ -+ ((lockdep_is_held(&(obj)->dev->wait_all_lock) != LOCK_STATE_NOT_HELD) && \ -+ (obj)->dev_locked)) ++/** ++ * asus_wmi_set_devstate() - Set the WMI function state. ++ * @dev_id: The WMI function to call. ++ * @ctrl_param: The argument to be used for this WMI function. ++ * @retval: A pointer to where to store the value returned from WMI. ++ * ++ * The returned WMI function state if not checked here for error as ++ * asus_wmi_set_devstate() is not called unless first paired with a call to ++ * asus_wmi_get_devstate_dsts() to check that the WMI function is supported. ++ * ++ * On success the return value is 0, and the retval is a valid value returned ++ * by the successful WMI function call. An error value is returned only if the ++ * WMI function failed. ++ */ ++int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval) + { + return asus_wmi_evaluate_method(ASUS_WMI_METHODID_DEVS, dev_id, + ctrl_param, retval); + } ++EXPORT_SYMBOL_NS_GPL(asus_wmi_set_devstate, "ASUS_WMI"); + + /* Helper for special devices with magic return codes */ + static int asus_wmi_get_devstate_bits(struct asus_wmi *asus, +@@ -687,6 +734,7 @@ static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus) + } + + /* Charging mode, 1=Barrel, 2=USB ******************************************/ ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + static ssize_t charge_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -697,12 +745,16 @@ static ssize_t charge_mode_show(struct device *dev, + if (result < 0) + return result; + ++ asus_wmi_show_deprecated(); + -+static bool is_signaled(struct ntsync_obj *obj, __u32 owner) -+{ -+ ntsync_assert_held(obj); + return sysfs_emit(buf, "%d\n", value & 0xff); + } + + static DEVICE_ATTR_RO(charge_mode); ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + /* dGPU ********************************************************************/ ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + static ssize_t dgpu_disable_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -713,6 +765,8 @@ static ssize_t dgpu_disable_show(struct device *dev, + if (result < 0) + return result; + ++ asus_wmi_show_deprecated(); + -+ switch (obj->type) { -+ case NTSYNC_TYPE_SEM: -+ return !!obj->u.sem.count; -+ case NTSYNC_TYPE_MUTEX: -+ if (obj->u.mutex.owner && obj->u.mutex.owner != owner) -+ return false; -+ return obj->u.mutex.count < UINT_MAX; -+ case NTSYNC_TYPE_EVENT: -+ return obj->u.event.signaled; -+ } + return sysfs_emit(buf, "%d\n", result); + } + +@@ -766,8 +820,10 @@ static ssize_t dgpu_disable_store(struct device *dev, + return count; + } + static DEVICE_ATTR_RW(dgpu_disable); ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + /* eGPU ********************************************************************/ ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + static ssize_t egpu_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -778,6 +834,8 @@ static ssize_t egpu_enable_show(struct device *dev, + if (result < 0) + return result; + ++ asus_wmi_show_deprecated(); + -+ WARN(1, "bad object type %#x\n", obj->type); -+ return false; -+} + return sysfs_emit(buf, "%d\n", result); + } + +@@ -834,8 +892,10 @@ static ssize_t egpu_enable_store(struct device *dev, + return count; + } + static DEVICE_ATTR_RW(egpu_enable); ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + /* Is eGPU connected? *********************************************************/ ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + static ssize_t egpu_connected_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -846,12 +906,16 @@ static ssize_t egpu_connected_show(struct device *dev, + if (result < 0) + return result; + ++ asus_wmi_show_deprecated(); + -+/* -+ * "locked_obj" is an optional pointer to an object which is already locked and -+ * should not be locked again. This is necessary so that changing an object's -+ * state and waking it can be a single atomic operation. -+ */ -+static void try_wake_all(struct ntsync_device *dev, struct ntsync_q *q, -+ struct ntsync_obj *locked_obj) -+{ -+ __u32 count = q->count; -+ bool can_wake = true; -+ int signaled = -1; -+ __u32 i; + return sysfs_emit(buf, "%d\n", result); + } + + static DEVICE_ATTR_RO(egpu_connected); ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + /* gpu mux switch *************************************************************/ ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + static ssize_t gpu_mux_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -862,6 +926,8 @@ static ssize_t gpu_mux_mode_show(struct device *dev, + if (result < 0) + return result; + ++ asus_wmi_show_deprecated(); + -+ lockdep_assert_held(&dev->wait_all_lock); -+ if (locked_obj) -+ lockdep_assert(locked_obj->dev_locked); + return sysfs_emit(buf, "%d\n", result); + } + +@@ -920,6 +986,7 @@ static ssize_t gpu_mux_mode_store(struct device *dev, + return count; + } + static DEVICE_ATTR_RW(gpu_mux_mode); ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + /* TUF Laptop Keyboard RGB Modes **********************************************/ + static ssize_t kbd_rgb_mode_store(struct device *dev, +@@ -1043,6 +1110,7 @@ static const struct attribute_group *kbd_rgb_mode_groups[] = { + }; + + /* Tunable: PPT: Intel=PL1, AMD=SPPT *****************************************/ ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + static ssize_t ppt_pl2_sppt_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +@@ -1081,6 +1149,8 @@ static ssize_t ppt_pl2_sppt_show(struct device *dev, + { + struct asus_wmi *asus = dev_get_drvdata(dev); + ++ asus_wmi_show_deprecated(); + -+ for (i = 0; i < count; i++) { -+ if (q->entries[i].obj != locked_obj) -+ dev_lock_obj(dev, q->entries[i].obj); -+ } + return sysfs_emit(buf, "%u\n", asus->ppt_pl2_sppt); + } + static DEVICE_ATTR_RW(ppt_pl2_sppt); +@@ -1123,6 +1193,8 @@ static ssize_t ppt_pl1_spl_show(struct device *dev, + { + struct asus_wmi *asus = dev_get_drvdata(dev); + ++ asus_wmi_show_deprecated(); + -+ for (i = 0; i < count; i++) { -+ if (!is_signaled(q->entries[i].obj, q->owner)) { -+ can_wake = false; -+ break; -+ } -+ } + return sysfs_emit(buf, "%u\n", asus->ppt_pl1_spl); + } + static DEVICE_ATTR_RW(ppt_pl1_spl); +@@ -1166,6 +1238,8 @@ static ssize_t ppt_fppt_show(struct device *dev, + { + struct asus_wmi *asus = dev_get_drvdata(dev); + ++ asus_wmi_show_deprecated(); + -+ if (can_wake && atomic_try_cmpxchg(&q->signaled, &signaled, 0)) { -+ for (i = 0; i < count; i++) { -+ struct ntsync_obj *obj = q->entries[i].obj; + return sysfs_emit(buf, "%u\n", asus->ppt_fppt); + } + static DEVICE_ATTR_RW(ppt_fppt); +@@ -1209,6 +1283,8 @@ static ssize_t ppt_apu_sppt_show(struct device *dev, + { + struct asus_wmi *asus = dev_get_drvdata(dev); + ++ asus_wmi_show_deprecated(); + -+ switch (obj->type) { -+ case NTSYNC_TYPE_SEM: -+ obj->u.sem.count--; -+ break; -+ case NTSYNC_TYPE_MUTEX: -+ if (obj->u.mutex.ownerdead) -+ q->ownerdead = true; -+ obj->u.mutex.ownerdead = false; -+ obj->u.mutex.count++; -+ obj->u.mutex.owner = q->owner; -+ break; -+ case NTSYNC_TYPE_EVENT: -+ if (!obj->u.event.manual) -+ obj->u.event.signaled = false; -+ break; -+ } -+ } -+ wake_up_process(q->task); -+ } + return sysfs_emit(buf, "%u\n", asus->ppt_apu_sppt); + } + static DEVICE_ATTR_RW(ppt_apu_sppt); +@@ -1252,6 +1328,8 @@ static ssize_t ppt_platform_sppt_show(struct device *dev, + { + struct asus_wmi *asus = dev_get_drvdata(dev); + ++ asus_wmi_show_deprecated(); + -+ for (i = 0; i < count; i++) { -+ if (q->entries[i].obj != locked_obj) -+ dev_unlock_obj(dev, q->entries[i].obj); -+ } -+} + return sysfs_emit(buf, "%u\n", asus->ppt_platform_sppt); + } + static DEVICE_ATTR_RW(ppt_platform_sppt); +@@ -1295,6 +1373,8 @@ static ssize_t nv_dynamic_boost_show(struct device *dev, + { + struct asus_wmi *asus = dev_get_drvdata(dev); + ++ asus_wmi_show_deprecated(); + -+static void try_wake_all_obj(struct ntsync_device *dev, struct ntsync_obj *obj) -+{ -+ struct ntsync_q_entry *entry; + return sysfs_emit(buf, "%u\n", asus->nv_dynamic_boost); + } + static DEVICE_ATTR_RW(nv_dynamic_boost); +@@ -1338,11 +1418,15 @@ static ssize_t nv_temp_target_show(struct device *dev, + { + struct asus_wmi *asus = dev_get_drvdata(dev); + ++ asus_wmi_show_deprecated(); + -+ lockdep_assert_held(&dev->wait_all_lock); -+ lockdep_assert(obj->dev_locked); + return sysfs_emit(buf, "%u\n", asus->nv_temp_target); + } + static DEVICE_ATTR_RW(nv_temp_target); ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + /* Ally MCU Powersave ********************************************************/ ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + static ssize_t mcu_powersave_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -1353,6 +1437,8 @@ static ssize_t mcu_powersave_show(struct device *dev, + if (result < 0) + return result; + ++ asus_wmi_show_deprecated(); + -+ list_for_each_entry(entry, &obj->all_waiters, node) -+ try_wake_all(dev, entry->q, obj); -+} + return sysfs_emit(buf, "%d\n", result); + } + +@@ -1388,6 +1474,7 @@ static ssize_t mcu_powersave_store(struct device *dev, + return count; + } + static DEVICE_ATTR_RW(mcu_powersave); ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + /* Battery ********************************************************************/ + +@@ -2261,6 +2348,7 @@ static int asus_wmi_rfkill_init(struct asus_wmi *asus) + } + + /* Panel Overdrive ************************************************************/ ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + static ssize_t panel_od_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -2271,6 +2359,8 @@ static ssize_t panel_od_show(struct device *dev, + if (result < 0) + return result; + ++ asus_wmi_show_deprecated(); + -+static void try_wake_any_sem(struct ntsync_obj *sem) -+{ -+ struct ntsync_q_entry *entry; + return sysfs_emit(buf, "%d\n", result); + } + +@@ -2307,9 +2397,10 @@ static ssize_t panel_od_store(struct device *dev, + return count; + } + static DEVICE_ATTR_RW(panel_od); ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + /* Bootup sound ***************************************************************/ +- ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + static ssize_t boot_sound_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -2320,6 +2411,8 @@ static ssize_t boot_sound_show(struct device *dev, + if (result < 0) + return result; + ++ asus_wmi_show_deprecated(); + -+ ntsync_assert_held(sem); -+ lockdep_assert(sem->type == NTSYNC_TYPE_SEM); + return sysfs_emit(buf, "%d\n", result); + } + +@@ -2355,8 +2448,10 @@ static ssize_t boot_sound_store(struct device *dev, + return count; + } + static DEVICE_ATTR_RW(boot_sound); ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + /* Mini-LED mode **************************************************************/ ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + static ssize_t mini_led_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -2387,6 +2482,8 @@ static ssize_t mini_led_mode_show(struct device *dev, + } + } + ++ asus_wmi_show_deprecated(); + -+ list_for_each_entry(entry, &sem->any_waiters, node) { -+ struct ntsync_q *q = entry->q; -+ int signaled = -1; + return sysfs_emit(buf, "%d\n", value); + } + +@@ -2457,10 +2554,13 @@ static ssize_t available_mini_led_mode_show(struct device *dev, + return sysfs_emit(buf, "0 1 2\n"); + } + ++ asus_wmi_show_deprecated(); + -+ if (!sem->u.sem.count) -+ break; + return sysfs_emit(buf, "0\n"); + } + + static DEVICE_ATTR_RO(available_mini_led_mode); ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + /* Quirks *********************************************************************/ + +@@ -3748,6 +3848,7 @@ static int throttle_thermal_policy_set_default(struct asus_wmi *asus) + return throttle_thermal_policy_write(asus); + } + ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + static ssize_t throttle_thermal_policy_show(struct device *dev, + struct device_attribute *attr, char *buf) + { +@@ -3791,6 +3892,7 @@ static ssize_t throttle_thermal_policy_store(struct device *dev, + * Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent + */ + static DEVICE_ATTR_RW(throttle_thermal_policy); ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + /* Platform profile ***********************************************************/ + static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof, +@@ -4388,27 +4490,29 @@ static struct attribute *platform_attributes[] = { + &dev_attr_camera.attr, + &dev_attr_cardr.attr, + &dev_attr_touchpad.attr, +- &dev_attr_charge_mode.attr, +- &dev_attr_egpu_enable.attr, +- &dev_attr_egpu_connected.attr, +- &dev_attr_dgpu_disable.attr, +- &dev_attr_gpu_mux_mode.attr, + &dev_attr_lid_resume.attr, + &dev_attr_als_enable.attr, + &dev_attr_fan_boost_mode.attr, +- &dev_attr_throttle_thermal_policy.attr, +- &dev_attr_ppt_pl2_sppt.attr, +- &dev_attr_ppt_pl1_spl.attr, +- &dev_attr_ppt_fppt.attr, +- &dev_attr_ppt_apu_sppt.attr, +- &dev_attr_ppt_platform_sppt.attr, +- &dev_attr_nv_dynamic_boost.attr, +- &dev_attr_nv_temp_target.attr, +- &dev_attr_mcu_powersave.attr, +- &dev_attr_boot_sound.attr, +- &dev_attr_panel_od.attr, +- &dev_attr_mini_led_mode.attr, +- &dev_attr_available_mini_led_mode.attr, ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) ++ &dev_attr_charge_mode.attr, ++ &dev_attr_egpu_enable.attr, ++ &dev_attr_egpu_connected.attr, ++ &dev_attr_dgpu_disable.attr, ++ &dev_attr_gpu_mux_mode.attr, ++ &dev_attr_ppt_pl2_sppt.attr, ++ &dev_attr_ppt_pl1_spl.attr, ++ &dev_attr_ppt_fppt.attr, ++ &dev_attr_ppt_apu_sppt.attr, ++ &dev_attr_ppt_platform_sppt.attr, ++ &dev_attr_nv_dynamic_boost.attr, ++ &dev_attr_nv_temp_target.attr, ++ &dev_attr_mcu_powersave.attr, ++ &dev_attr_boot_sound.attr, ++ &dev_attr_panel_od.attr, ++ &dev_attr_mini_led_mode.attr, ++ &dev_attr_available_mini_led_mode.attr, ++ &dev_attr_throttle_thermal_policy.attr, ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + NULL + }; + +@@ -4430,7 +4534,11 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, + devid = ASUS_WMI_DEVID_LID_RESUME; + else if (attr == &dev_attr_als_enable.attr) + devid = ASUS_WMI_DEVID_ALS_ENABLE; +- else if (attr == &dev_attr_charge_mode.attr) ++ else if (attr == &dev_attr_fan_boost_mode.attr) ++ ok = asus->fan_boost_mode_available; + -+ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) { -+ sem->u.sem.count--; -+ wake_up_process(q->task); -+ } ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) ++ if (attr == &dev_attr_charge_mode.attr) + devid = ASUS_WMI_DEVID_CHARGE_MODE; + else if (attr == &dev_attr_egpu_enable.attr) + ok = asus->egpu_enable_available; +@@ -4468,6 +4576,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, + ok = asus->mini_led_dev_id != 0; + else if (attr == &dev_attr_available_mini_led_mode.attr) + ok = asus->mini_led_dev_id != 0; ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + if (devid != -1) { + ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); +@@ -4707,7 +4816,19 @@ static int asus_wmi_add(struct platform_device *pdev) + if (err) + goto fail_platform; + ++ ally_mcu_usb_plug = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE) ++ && dmi_check_system(asus_rog_ally_device); ++ if (ally_mcu_usb_plug) { ++ /* ++ * These steps ensure the device is in a valid good state, this is ++ * especially important for the Ally 1 after a reboot. ++ */ ++ acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, ASUS_USB0_PWR_EC0_CSEE_ON); ++ msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT); + } -+} -+ -+static void try_wake_any_mutex(struct ntsync_obj *mutex) -+{ -+ struct ntsync_q_entry *entry; -+ -+ ntsync_assert_held(mutex); -+ lockdep_assert(mutex->type == NTSYNC_TYPE_MUTEX); -+ -+ list_for_each_entry(entry, &mutex->any_waiters, node) { -+ struct ntsync_q *q = entry->q; -+ int signaled = -1; + -+ if (mutex->u.mutex.count == UINT_MAX) -+ break; -+ if (mutex->u.mutex.owner && mutex->u.mutex.owner != q->owner) -+ continue; + /* ensure defaults for tunables */ ++#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + asus->ppt_pl2_sppt = 5; + asus->ppt_pl1_spl = 5; + asus->ppt_apu_sppt = 5; +@@ -4719,8 +4840,6 @@ static int asus_wmi_add(struct platform_device *pdev) + asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU); + asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU); + asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE); +- asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE) +- && dmi_check_system(asus_ally_mcu_quirk); + + if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE)) + asus->mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE; +@@ -4731,23 +4850,24 @@ static int asus_wmi_add(struct platform_device *pdev) + asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX; + else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX_VIVO)) + asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX_VIVO; +- +- if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE)) +- asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE; +- else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2)) +- asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2; ++#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + + if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY)) + asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY; + else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO)) + asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO; + ++ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE)) ++ asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE; ++ else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2)) ++ asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2; + -+ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) { -+ if (mutex->u.mutex.ownerdead) -+ q->ownerdead = true; -+ mutex->u.mutex.ownerdead = false; -+ mutex->u.mutex.count++; -+ mutex->u.mutex.owner = q->owner; -+ wake_up_process(q->task); -+ } + err = fan_boost_mode_check_present(asus); + if (err) + goto fail_fan_boost_mode; + + err = platform_profile_setup(asus); +- if (err) ++ if (err && err != -EEXIST) + goto fail_platform_profile_setup; + + err = asus_wmi_sysfs_init(asus->platform_device); +@@ -4911,34 +5031,6 @@ static int asus_hotk_resume(struct device *device) + return 0; + } + +-static int asus_hotk_resume_early(struct device *device) +-{ +- struct asus_wmi *asus = dev_get_drvdata(device); +- +- if (asus->ally_mcu_usb_switch) { +- /* sleep required to prevent USB0 being yanked then reappearing rapidly */ +- if (ACPI_FAILURE(acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, 0xB8))) +- dev_err(device, "ROG Ally MCU failed to connect USB dev\n"); +- else +- msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT); +- } +- return 0; +-} +- +-static int asus_hotk_prepare(struct device *device) +-{ +- struct asus_wmi *asus = dev_get_drvdata(device); +- +- if (asus->ally_mcu_usb_switch) { +- /* sleep required to ensure USB0 is disabled before sleep continues */ +- if (ACPI_FAILURE(acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, 0xB7))) +- dev_err(device, "ROG Ally MCU failed to disconnect USB dev\n"); +- else +- msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT); +- } +- return 0; +-} +- + static int asus_hotk_restore(struct device *device) + { + struct asus_wmi *asus = dev_get_drvdata(device); +@@ -4979,11 +5071,32 @@ static int asus_hotk_restore(struct device *device) + return 0; + } + ++static void asus_ally_s2idle_restore(void) ++{ ++ if (ally_mcu_usb_plug) { ++ acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, ASUS_USB0_PWR_EC0_CSEE_ON); ++ msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT); + } +} + -+static void try_wake_any_event(struct ntsync_obj *event) ++static int asus_hotk_prepare(struct device *device) +{ -+ struct ntsync_q_entry *entry; -+ -+ ntsync_assert_held(event); -+ lockdep_assert(event->type == NTSYNC_TYPE_EVENT); -+ -+ list_for_each_entry(entry, &event->any_waiters, node) { -+ struct ntsync_q *q = entry->q; -+ int signaled = -1; -+ -+ if (!event->u.event.signaled) -+ break; -+ -+ if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) { -+ if (!event->u.event.manual) -+ event->u.event.signaled = false; -+ wake_up_process(q->task); -+ } ++ if (ally_mcu_usb_plug) { ++ acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, ASUS_USB0_PWR_EC0_CSEE_OFF); ++ msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT); + } ++ return 0; +} + - /* - * Actually change the semaphore state, returning -EOVERFLOW if it is made - * invalid. - */ --static int post_sem_state(struct ntsync_obj *sem, __u32 count) -+static int release_sem_state(struct ntsync_obj *sem, __u32 count) - { - __u32 sum; ++/* Use only for Ally devices due to the wake_on_ac */ ++static struct acpi_s2idle_dev_ops asus_ally_s2idle_dev_ops = { ++ .restore = asus_ally_s2idle_restore, ++}; ++ + static const struct dev_pm_ops asus_pm_ops = { + .thaw = asus_hotk_thaw, + .restore = asus_hotk_restore, + .resume = asus_hotk_resume, +- .resume_early = asus_hotk_resume_early, + .prepare = asus_hotk_prepare, + }; -- lockdep_assert_held(&sem->lock); -+ ntsync_assert_held(sem); +@@ -5011,6 +5124,10 @@ static int asus_wmi_probe(struct platform_device *pdev) + return ret; + } - if (check_add_overflow(sem->u.sem.count, count, &sum) || - sum > sem->u.sem.max) -@@ -71,11 +404,13 @@ static int post_sem_state(struct ntsync_obj *sem, __u32 count) - return 0; ++ ret = acpi_register_lps0_dev(&asus_ally_s2idle_dev_ops); ++ if (ret) ++ pr_warn("failed to register LPS0 sleep handler in asus-wmi\n"); ++ + return asus_wmi_add(pdev); } --static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp) -+static int ntsync_sem_release(struct ntsync_obj *sem, void __user *argp) - { -+ struct ntsync_device *dev = sem->dev; - __u32 __user *user_args = argp; - __u32 prev_count; - __u32 args; -+ bool all; - int ret; - - if (copy_from_user(&args, argp, sizeof(args))) -@@ -84,12 +419,17 @@ static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp) - if (sem->type != NTSYNC_TYPE_SEM) - return -EINVAL; +@@ -5043,6 +5160,7 @@ EXPORT_SYMBOL_GPL(asus_wmi_register_driver); -- spin_lock(&sem->lock); -+ all = ntsync_lock_obj(dev, sem); + void asus_wmi_unregister_driver(struct asus_wmi_driver *driver) + { ++ acpi_unregister_lps0_dev(&asus_ally_s2idle_dev_ops); + platform_device_unregister(driver->platform_device); + platform_driver_unregister(&driver->platform_driver); + used = false; +diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig +index 37c24ffea65c..bd52d1e081b7 100644 +--- a/drivers/scsi/Kconfig ++++ b/drivers/scsi/Kconfig +@@ -1522,4 +1522,6 @@ endif # SCSI_LOWLEVEL - prev_count = sem->u.sem.count; -- ret = post_sem_state(sem, args); -+ ret = release_sem_state(sem, args); -+ if (!ret) { -+ if (all) -+ try_wake_all_obj(dev, sem); -+ try_wake_any_sem(sem); -+ } + source "drivers/scsi/device_handler/Kconfig" -- spin_unlock(&sem->lock); -+ ntsync_unlock_obj(dev, sem, all); ++source "drivers/scsi/vhba/Kconfig" ++ + endmenu +diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile +index 1313ddf2fd1a..5942e8f79159 100644 +--- a/drivers/scsi/Makefile ++++ b/drivers/scsi/Makefile +@@ -153,6 +153,7 @@ obj-$(CONFIG_CHR_DEV_SCH) += ch.o + obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o - if (!ret && put_user(prev_count, user_args)) - ret = -EFAULT; -@@ -97,6 +437,220 @@ static int ntsync_sem_post(struct ntsync_obj *sem, void __user *argp) - return ret; - } + obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas/ ++obj-$(CONFIG_VHBA) += vhba/ + # This goes last, so that "real" scsi devices probe earlier + obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o +diff --git a/drivers/scsi/vhba/Kconfig b/drivers/scsi/vhba/Kconfig +new file mode 100644 +index 000000000000..e70a381fe3df +--- /dev/null ++++ b/drivers/scsi/vhba/Kconfig +@@ -0,0 +1,9 @@ ++config VHBA ++ tristate "Virtual (SCSI) Host Bus Adapter" ++ depends on SCSI ++ help ++ This is the in-kernel part of CDEmu, a CD/DVD-ROM device ++ emulator. ++ ++ This driver can also be built as a module. If so, the module ++ will be called vhba. +diff --git a/drivers/scsi/vhba/Makefile b/drivers/scsi/vhba/Makefile +new file mode 100644 +index 000000000000..2d7524b66199 +--- /dev/null ++++ b/drivers/scsi/vhba/Makefile +@@ -0,0 +1,4 @@ ++VHBA_VERSION := 20240917 ++ ++obj-$(CONFIG_VHBA) += vhba.o ++ccflags-y := -DVHBA_VERSION=\"$(VHBA_VERSION)\" -Werror +diff --git a/drivers/scsi/vhba/vhba.c b/drivers/scsi/vhba/vhba.c +new file mode 100644 +index 000000000000..7531223355e5 +--- /dev/null ++++ b/drivers/scsi/vhba/vhba.c +@@ -0,0 +1,1130 @@ +/* -+ * Actually change the mutex state, returning -EPERM if not the owner. ++ * vhba.c ++ * ++ * Copyright (C) 2007-2012 Chia-I Wu ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ -+static int unlock_mutex_state(struct ntsync_obj *mutex, -+ const struct ntsync_mutex_args *args) -+{ -+ ntsync_assert_held(mutex); + -+ if (mutex->u.mutex.owner != args->owner) -+ return -EPERM; ++#define pr_fmt(fmt) "vhba: " fmt + -+ if (!--mutex->u.mutex.count) -+ mutex->u.mutex.owner = 0; -+ return 0; -+} ++#include + -+static int ntsync_mutex_unlock(struct ntsync_obj *mutex, void __user *argp) -+{ -+ struct ntsync_mutex_args __user *user_args = argp; -+ struct ntsync_device *dev = mutex->dev; -+ struct ntsync_mutex_args args; -+ __u32 prev_count; -+ bool all; -+ int ret; ++#include ++#include ++#include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) ++#include ++#else ++#include ++#endif ++#include ++#include ++#include ++#include ++#include ++#ifdef CONFIG_COMPAT ++#include ++#endif ++#include ++#include ++#include ++#include ++#include ++#include + -+ if (copy_from_user(&args, argp, sizeof(args))) -+ return -EFAULT; -+ if (!args.owner) -+ return -EINVAL; + -+ if (mutex->type != NTSYNC_TYPE_MUTEX) -+ return -EINVAL; ++MODULE_AUTHOR("Chia-I Wu"); ++MODULE_VERSION(VHBA_VERSION); ++MODULE_DESCRIPTION("Virtual SCSI HBA"); ++MODULE_LICENSE("GPL"); + -+ all = ntsync_lock_obj(dev, mutex); + -+ prev_count = mutex->u.mutex.count; -+ ret = unlock_mutex_state(mutex, &args); -+ if (!ret) { -+ if (all) -+ try_wake_all_obj(dev, mutex); -+ try_wake_any_mutex(mutex); -+ } ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) ++#define sdev_dbg(sdev, fmt, a...) \ ++ dev_dbg(&(sdev)->sdev_gendev, fmt, ##a) ++#define scmd_dbg(scmd, fmt, a...) \ ++ dev_dbg(&(scmd)->device->sdev_gendev, fmt, ##a) ++#endif + -+ ntsync_unlock_obj(dev, mutex, all); ++#define VHBA_MAX_SECTORS_PER_IO 256 ++#define VHBA_MAX_BUS 16 ++#define VHBA_MAX_ID 16 ++#define VHBA_MAX_DEVICES (VHBA_MAX_BUS * (VHBA_MAX_ID-1)) ++#define VHBA_KBUF_SIZE PAGE_SIZE + -+ if (!ret && put_user(prev_count, &user_args->count)) -+ ret = -EFAULT; ++#define DATA_TO_DEVICE(dir) ((dir) == DMA_TO_DEVICE || (dir) == DMA_BIDIRECTIONAL) ++#define DATA_FROM_DEVICE(dir) ((dir) == DMA_FROM_DEVICE || (dir) == DMA_BIDIRECTIONAL) + -+ return ret; -+} + -+/* -+ * Actually change the mutex state to mark its owner as dead, -+ * returning -EPERM if not the owner. -+ */ -+static int kill_mutex_state(struct ntsync_obj *mutex, __u32 owner) -+{ -+ ntsync_assert_held(mutex); ++static int vhba_can_queue = 32; ++module_param_named(can_queue, vhba_can_queue, int, 0); + -+ if (mutex->u.mutex.owner != owner) -+ return -EPERM; + -+ mutex->u.mutex.ownerdead = true; -+ mutex->u.mutex.owner = 0; -+ mutex->u.mutex.count = 0; -+ return 0; -+} ++enum vhba_req_state { ++ VHBA_REQ_FREE, ++ VHBA_REQ_PENDING, ++ VHBA_REQ_READING, ++ VHBA_REQ_SENT, ++ VHBA_REQ_WRITING, ++}; + -+static int ntsync_mutex_kill(struct ntsync_obj *mutex, void __user *argp) -+{ -+ struct ntsync_device *dev = mutex->dev; -+ __u32 owner; -+ bool all; -+ int ret; ++struct vhba_command { ++ struct scsi_cmnd *cmd; ++ /* metatags are per-host. not to be confused with ++ queue tags that are usually per-lun */ ++ unsigned long metatag; ++ int status; ++ struct list_head entry; ++}; + -+ if (get_user(owner, (__u32 __user *)argp)) -+ return -EFAULT; -+ if (!owner) -+ return -EINVAL; ++struct vhba_device { ++ unsigned int num; ++ spinlock_t cmd_lock; ++ struct list_head cmd_list; ++ wait_queue_head_t cmd_wq; ++ atomic_t refcnt; + -+ if (mutex->type != NTSYNC_TYPE_MUTEX) -+ return -EINVAL; ++ unsigned char *kbuf; ++ size_t kbuf_size; ++}; + -+ all = ntsync_lock_obj(dev, mutex); ++struct vhba_host { ++ struct Scsi_Host *shost; ++ spinlock_t cmd_lock; ++ int cmd_next; ++ struct vhba_command *commands; ++ spinlock_t dev_lock; ++ struct vhba_device *devices[VHBA_MAX_DEVICES]; ++ int num_devices; ++ DECLARE_BITMAP(chgmap, VHBA_MAX_DEVICES); ++ int chgtype[VHBA_MAX_DEVICES]; ++ struct work_struct scan_devices; ++}; + -+ ret = kill_mutex_state(mutex, owner); -+ if (!ret) { -+ if (all) -+ try_wake_all_obj(dev, mutex); -+ try_wake_any_mutex(mutex); -+ } ++#define MAX_COMMAND_SIZE 16 + -+ ntsync_unlock_obj(dev, mutex, all); ++struct vhba_request { ++ __u32 metatag; ++ __u32 lun; ++ __u8 cdb[MAX_COMMAND_SIZE]; ++ __u8 cdb_len; ++ __u32 data_len; ++}; + -+ return ret; -+} ++struct vhba_response { ++ __u32 metatag; ++ __u32 status; ++ __u32 data_len; ++}; + -+static int ntsync_event_set(struct ntsync_obj *event, void __user *argp, bool pulse) -+{ -+ struct ntsync_device *dev = event->dev; -+ __u32 prev_state; -+ bool all; + -+ if (event->type != NTSYNC_TYPE_EVENT) -+ return -EINVAL; + -+ all = ntsync_lock_obj(dev, event); ++static struct vhba_command *vhba_alloc_command (void); ++static void vhba_free_command (struct vhba_command *vcmd); + -+ prev_state = event->u.event.signaled; -+ event->u.event.signaled = true; -+ if (all) -+ try_wake_all_obj(dev, event); -+ try_wake_any_event(event); -+ if (pulse) -+ event->u.event.signaled = false; ++static struct platform_device vhba_platform_device; + -+ ntsync_unlock_obj(dev, event, all); + -+ if (put_user(prev_state, (__u32 __user *)argp)) -+ return -EFAULT; + -+ return 0; ++/* These functions define a symmetric 1:1 mapping between device numbers and ++ the bus and id. We have reserved the last id per bus for the host itself. */ ++static void devnum_to_bus_and_id(unsigned int devnum, unsigned int *bus, unsigned int *id) ++{ ++ *bus = devnum / (VHBA_MAX_ID-1); ++ *id = devnum % (VHBA_MAX_ID-1); +} + -+static int ntsync_event_reset(struct ntsync_obj *event, void __user *argp) ++static unsigned int bus_and_id_to_devnum(unsigned int bus, unsigned int id) +{ -+ struct ntsync_device *dev = event->dev; -+ __u32 prev_state; -+ bool all; -+ -+ if (event->type != NTSYNC_TYPE_EVENT) -+ return -EINVAL; ++ return (bus * (VHBA_MAX_ID-1)) + id; ++} + -+ all = ntsync_lock_obj(dev, event); ++static struct vhba_device *vhba_device_alloc (void) ++{ ++ struct vhba_device *vdev; + -+ prev_state = event->u.event.signaled; -+ event->u.event.signaled = false; ++ vdev = kzalloc(sizeof(struct vhba_device), GFP_KERNEL); ++ if (!vdev) { ++ return NULL; ++ } + -+ ntsync_unlock_obj(dev, event, all); ++ spin_lock_init(&vdev->cmd_lock); ++ INIT_LIST_HEAD(&vdev->cmd_list); ++ init_waitqueue_head(&vdev->cmd_wq); ++ atomic_set(&vdev->refcnt, 1); + -+ if (put_user(prev_state, (__u32 __user *)argp)) -+ return -EFAULT; ++ vdev->kbuf = NULL; ++ vdev->kbuf_size = 0; + -+ return 0; ++ return vdev; +} + -+static int ntsync_sem_read(struct ntsync_obj *sem, void __user *argp) ++static void vhba_device_put (struct vhba_device *vdev) +{ -+ struct ntsync_sem_args __user *user_args = argp; -+ struct ntsync_device *dev = sem->dev; -+ struct ntsync_sem_args args; -+ bool all; -+ -+ if (sem->type != NTSYNC_TYPE_SEM) -+ return -EINVAL; -+ -+ all = ntsync_lock_obj(dev, sem); -+ -+ args.count = sem->u.sem.count; -+ args.max = sem->u.sem.max; ++ if (atomic_dec_and_test(&vdev->refcnt)) { ++ kfree(vdev); ++ } ++} + -+ ntsync_unlock_obj(dev, sem, all); ++static struct vhba_device *vhba_device_get (struct vhba_device *vdev) ++{ ++ atomic_inc(&vdev->refcnt); + -+ if (copy_to_user(user_args, &args, sizeof(args))) -+ return -EFAULT; -+ return 0; ++ return vdev; +} + -+static int ntsync_mutex_read(struct ntsync_obj *mutex, void __user *argp) ++static int vhba_device_queue (struct vhba_device *vdev, struct scsi_cmnd *cmd) +{ -+ struct ntsync_mutex_args __user *user_args = argp; -+ struct ntsync_device *dev = mutex->dev; -+ struct ntsync_mutex_args args; -+ bool all; -+ int ret; ++ struct vhba_host *vhost; ++ struct vhba_command *vcmd; ++ unsigned long flags; + -+ if (mutex->type != NTSYNC_TYPE_MUTEX) -+ return -EINVAL; ++ vhost = platform_get_drvdata(&vhba_platform_device); + -+ all = ntsync_lock_obj(dev, mutex); ++ vcmd = vhba_alloc_command(); ++ if (!vcmd) { ++ return SCSI_MLQUEUE_HOST_BUSY; ++ } + -+ args.count = mutex->u.mutex.count; -+ args.owner = mutex->u.mutex.owner; -+ ret = mutex->u.mutex.ownerdead ? -EOWNERDEAD : 0; ++ vcmd->cmd = cmd; + -+ ntsync_unlock_obj(dev, mutex, all); ++ spin_lock_irqsave(&vdev->cmd_lock, flags); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) ++ vcmd->metatag = scsi_cmd_to_rq(vcmd->cmd)->tag; ++#else ++ vcmd->metatag = vcmd->cmd->request->tag; ++#endif ++ list_add_tail(&vcmd->entry, &vdev->cmd_list); ++ spin_unlock_irqrestore(&vdev->cmd_lock, flags); + -+ if (copy_to_user(user_args, &args, sizeof(args))) -+ return -EFAULT; -+ return ret; ++ wake_up_interruptible(&vdev->cmd_wq); ++ ++ return 0; +} + -+static int ntsync_event_read(struct ntsync_obj *event, void __user *argp) ++static int vhba_device_dequeue (struct vhba_device *vdev, struct scsi_cmnd *cmd) +{ -+ struct ntsync_event_args __user *user_args = argp; -+ struct ntsync_device *dev = event->dev; -+ struct ntsync_event_args args; -+ bool all; ++ struct vhba_command *vcmd; ++ int retval; ++ unsigned long flags; + -+ if (event->type != NTSYNC_TYPE_EVENT) -+ return -EINVAL; ++ spin_lock_irqsave(&vdev->cmd_lock, flags); ++ list_for_each_entry(vcmd, &vdev->cmd_list, entry) { ++ if (vcmd->cmd == cmd) { ++ list_del_init(&vcmd->entry); ++ break; ++ } ++ } + -+ all = ntsync_lock_obj(dev, event); ++ /* command not found */ ++ if (&vcmd->entry == &vdev->cmd_list) { ++ spin_unlock_irqrestore(&vdev->cmd_lock, flags); ++ return SUCCESS; ++ } + -+ args.manual = event->u.event.manual; -+ args.signaled = event->u.event.signaled; ++ while (vcmd->status == VHBA_REQ_READING || vcmd->status == VHBA_REQ_WRITING) { ++ spin_unlock_irqrestore(&vdev->cmd_lock, flags); ++ scmd_dbg(cmd, "wait for I/O before aborting\n"); ++ schedule_timeout(1); ++ spin_lock_irqsave(&vdev->cmd_lock, flags); ++ } + -+ ntsync_unlock_obj(dev, event, all); ++ retval = (vcmd->status == VHBA_REQ_SENT) ? FAILED : SUCCESS; + -+ if (copy_to_user(user_args, &args, sizeof(args))) -+ return -EFAULT; -+ return 0; -+} ++ vhba_free_command(vcmd); + - static int ntsync_obj_release(struct inode *inode, struct file *file) - { - struct ntsync_obj *obj = file->private_data; -@@ -114,8 +668,24 @@ static long ntsync_obj_ioctl(struct file *file, unsigned int cmd, - void __user *argp = (void __user *)parm; - - switch (cmd) { -- case NTSYNC_IOC_SEM_POST: -- return ntsync_sem_post(obj, argp); -+ case NTSYNC_IOC_SEM_RELEASE: -+ return ntsync_sem_release(obj, argp); -+ case NTSYNC_IOC_SEM_READ: -+ return ntsync_sem_read(obj, argp); -+ case NTSYNC_IOC_MUTEX_UNLOCK: -+ return ntsync_mutex_unlock(obj, argp); -+ case NTSYNC_IOC_MUTEX_KILL: -+ return ntsync_mutex_kill(obj, argp); -+ case NTSYNC_IOC_MUTEX_READ: -+ return ntsync_mutex_read(obj, argp); -+ case NTSYNC_IOC_EVENT_SET: -+ return ntsync_event_set(obj, argp, false); -+ case NTSYNC_IOC_EVENT_RESET: -+ return ntsync_event_reset(obj, argp); -+ case NTSYNC_IOC_EVENT_PULSE: -+ return ntsync_event_set(obj, argp, true); -+ case NTSYNC_IOC_EVENT_READ: -+ return ntsync_event_read(obj, argp); - default: - return -ENOIOCTLCMD; - } -@@ -140,6 +710,9 @@ static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev, - obj->dev = dev; - get_file(dev->file); - spin_lock_init(&obj->lock); -+ INIT_LIST_HEAD(&obj->any_waiters); -+ INIT_LIST_HEAD(&obj->all_waiters); -+ atomic_set(&obj->all_hint, 0); - - return obj; - } -@@ -165,7 +738,6 @@ static int ntsync_obj_get_fd(struct ntsync_obj *obj) - - static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp) - { -- struct ntsync_sem_args __user *user_args = argp; - struct ntsync_sem_args args; - struct ntsync_obj *sem; - int fd; -@@ -182,12 +754,398 @@ static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp) - sem->u.sem.count = args.count; - sem->u.sem.max = args.max; - fd = ntsync_obj_get_fd(sem); -- if (fd < 0) { -+ if (fd < 0) - kfree(sem); -- return fd; ++ spin_unlock_irqrestore(&vdev->cmd_lock, flags); + -+ return fd; ++ return retval; +} + -+static int ntsync_create_mutex(struct ntsync_device *dev, void __user *argp) ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) ++static int vhba_slave_alloc(struct scsi_device *sdev) +{ -+ struct ntsync_mutex_args args; -+ struct ntsync_obj *mutex; -+ int fd; -+ -+ if (copy_from_user(&args, argp, sizeof(args))) -+ return -EFAULT; -+ -+ if (!args.owner != !args.count) -+ return -EINVAL; ++ struct Scsi_Host *shost = sdev->host; + -+ mutex = ntsync_alloc_obj(dev, NTSYNC_TYPE_MUTEX); -+ if (!mutex) -+ return -ENOMEM; -+ mutex->u.mutex.count = args.count; -+ mutex->u.mutex.owner = args.owner; -+ fd = ntsync_obj_get_fd(mutex); -+ if (fd < 0) -+ kfree(mutex); ++ sdev_dbg(sdev, "enabling tagging (queue depth: %i).\n", sdev->queue_depth); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) ++ if (!shost_use_blk_mq(shost) && shost->bqt) { ++#else ++ if (shost->bqt) { ++#endif ++ blk_queue_init_tags(sdev->request_queue, sdev->queue_depth, shost->bqt); ++ } ++ scsi_adjust_queue_depth(sdev, 0, sdev->queue_depth); + -+ return fd; ++ return 0; +} ++#endif + -+static int ntsync_create_event(struct ntsync_device *dev, void __user *argp) ++static void vhba_scan_devices_add (struct vhba_host *vhost, int bus, int id) +{ -+ struct ntsync_event_args args; -+ struct ntsync_obj *event; -+ int fd; -+ -+ if (copy_from_user(&args, argp, sizeof(args))) -+ return -EFAULT; -+ -+ event = ntsync_alloc_obj(dev, NTSYNC_TYPE_EVENT); -+ if (!event) -+ return -ENOMEM; -+ event->u.event.manual = args.manual; -+ event->u.event.signaled = args.signaled; -+ fd = ntsync_obj_get_fd(event); -+ if (fd < 0) -+ kfree(event); ++ struct scsi_device *sdev; + -+ return fd; ++ sdev = scsi_device_lookup(vhost->shost, bus, id, 0); ++ if (!sdev) { ++ scsi_add_device(vhost->shost, bus, id, 0); ++ } else { ++ dev_warn(&vhost->shost->shost_gendev, "tried to add an already-existing device %d:%d:0!\n", bus, id); ++ scsi_device_put(sdev); ++ } +} + -+static struct ntsync_obj *get_obj(struct ntsync_device *dev, int fd) ++static void vhba_scan_devices_remove (struct vhba_host *vhost, int bus, int id) +{ -+ struct file *file = fget(fd); -+ struct ntsync_obj *obj; -+ -+ if (!file) -+ return NULL; -+ -+ if (file->f_op != &ntsync_obj_fops) { -+ fput(file); -+ return NULL; - } - -- return put_user(fd, &user_args->sem); -+ obj = file->private_data; -+ if (obj->dev != dev) { -+ fput(file); -+ return NULL; -+ } ++ struct scsi_device *sdev; + -+ return obj; ++ sdev = scsi_device_lookup(vhost->shost, bus, id, 0); ++ if (sdev) { ++ scsi_remove_device(sdev); ++ scsi_device_put(sdev); ++ } else { ++ dev_warn(&vhost->shost->shost_gendev, "tried to remove non-existing device %d:%d:0!\n", bus, id); ++ } +} + -+static void put_obj(struct ntsync_obj *obj) ++static void vhba_scan_devices (struct work_struct *work) +{ -+ fput(obj->file); -+} ++ struct vhba_host *vhost = container_of(work, struct vhba_host, scan_devices); ++ unsigned long flags; ++ int change, exists; ++ unsigned int devnum; ++ unsigned int bus, id; + -+static int ntsync_schedule(const struct ntsync_q *q, const struct ntsync_wait_args *args) -+{ -+ ktime_t timeout = ns_to_ktime(args->timeout); -+ clockid_t clock = CLOCK_MONOTONIC; -+ ktime_t *timeout_ptr; -+ int ret = 0; ++ for (;;) { ++ spin_lock_irqsave(&vhost->dev_lock, flags); + -+ timeout_ptr = (args->timeout == U64_MAX ? NULL : &timeout); ++ devnum = find_first_bit(vhost->chgmap, VHBA_MAX_DEVICES); ++ if (devnum >= VHBA_MAX_DEVICES) { ++ spin_unlock_irqrestore(&vhost->dev_lock, flags); ++ break; ++ } ++ change = vhost->chgtype[devnum]; ++ exists = vhost->devices[devnum] != NULL; + -+ if (args->flags & NTSYNC_WAIT_REALTIME) -+ clock = CLOCK_REALTIME; ++ vhost->chgtype[devnum] = 0; ++ clear_bit(devnum, vhost->chgmap); + -+ do { -+ if (signal_pending(current)) { -+ ret = -ERESTARTSYS; -+ break; -+ } ++ spin_unlock_irqrestore(&vhost->dev_lock, flags); + -+ set_current_state(TASK_INTERRUPTIBLE); -+ if (atomic_read(&q->signaled) != -1) { -+ ret = 0; -+ break; -+ } -+ ret = schedule_hrtimeout_range_clock(timeout_ptr, 0, HRTIMER_MODE_ABS, clock); -+ } while (ret < 0); -+ __set_current_state(TASK_RUNNING); ++ devnum_to_bus_and_id(devnum, &bus, &id); + -+ return ret; ++ if (change < 0) { ++ dev_dbg(&vhost->shost->shost_gendev, "trying to remove target %d:%d:0\n", bus, id); ++ vhba_scan_devices_remove(vhost, bus, id); ++ } else if (change > 0) { ++ dev_dbg(&vhost->shost->shost_gendev, "trying to add target %d:%d:0\n", bus, id); ++ vhba_scan_devices_add(vhost, bus, id); ++ } else { ++ /* quick sequence of add/remove or remove/add; we determine ++ which one it was by checking if device structure exists */ ++ if (exists) { ++ /* remove followed by add: remove and (re)add */ ++ dev_dbg(&vhost->shost->shost_gendev, "trying to (re)add target %d:%d:0\n", bus, id); ++ vhba_scan_devices_remove(vhost, bus, id); ++ vhba_scan_devices_add(vhost, bus, id); ++ } else { ++ /* add followed by remove: no-op */ ++ dev_dbg(&vhost->shost->shost_gendev, "no-op for target %d:%d:0\n", bus, id); ++ } ++ } ++ } +} + -+/* -+ * Allocate and initialize the ntsync_q structure, but do not queue us yet. -+ */ -+static int setup_wait(struct ntsync_device *dev, -+ const struct ntsync_wait_args *args, bool all, -+ struct ntsync_q **ret_q) ++static int vhba_add_device (struct vhba_device *vdev) +{ -+ int fds[NTSYNC_MAX_WAIT_COUNT + 1]; -+ const __u32 count = args->count; -+ struct ntsync_q *q; -+ __u32 total_count; -+ __u32 i, j; ++ struct vhba_host *vhost; ++ unsigned int devnum; ++ unsigned long flags; + -+ if (args->pad || (args->flags & ~NTSYNC_WAIT_REALTIME)) -+ return -EINVAL; ++ vhost = platform_get_drvdata(&vhba_platform_device); + -+ if (args->count > NTSYNC_MAX_WAIT_COUNT) -+ return -EINVAL; ++ vhba_device_get(vdev); + -+ total_count = count; -+ if (args->alert) -+ total_count++; ++ spin_lock_irqsave(&vhost->dev_lock, flags); ++ if (vhost->num_devices >= VHBA_MAX_DEVICES) { ++ spin_unlock_irqrestore(&vhost->dev_lock, flags); ++ vhba_device_put(vdev); ++ return -EBUSY; ++ } ++ ++ for (devnum = 0; devnum < VHBA_MAX_DEVICES; devnum++) { ++ if (vhost->devices[devnum] == NULL) { ++ vdev->num = devnum; ++ vhost->devices[devnum] = vdev; ++ vhost->num_devices++; ++ set_bit(devnum, vhost->chgmap); ++ vhost->chgtype[devnum]++; ++ break; ++ } ++ } ++ spin_unlock_irqrestore(&vhost->dev_lock, flags); + -+ if (copy_from_user(fds, u64_to_user_ptr(args->objs), -+ array_size(count, sizeof(*fds)))) -+ return -EFAULT; -+ if (args->alert) -+ fds[count] = args->alert; ++ schedule_work(&vhost->scan_devices); + -+ q = kmalloc(struct_size(q, entries, total_count), GFP_KERNEL); -+ if (!q) -+ return -ENOMEM; -+ q->task = current; -+ q->owner = args->owner; -+ atomic_set(&q->signaled, -1); -+ q->all = all; -+ q->ownerdead = false; -+ q->count = count; ++ return 0; ++} + -+ for (i = 0; i < total_count; i++) { -+ struct ntsync_q_entry *entry = &q->entries[i]; -+ struct ntsync_obj *obj = get_obj(dev, fds[i]); ++static int vhba_remove_device (struct vhba_device *vdev) ++{ ++ struct vhba_host *vhost; ++ unsigned long flags; + -+ if (!obj) -+ goto err; ++ vhost = platform_get_drvdata(&vhba_platform_device); + -+ if (all) { -+ /* Check that the objects are all distinct. */ -+ for (j = 0; j < i; j++) { -+ if (obj == q->entries[j].obj) { -+ put_obj(obj); -+ goto err; -+ } -+ } -+ } ++ spin_lock_irqsave(&vhost->dev_lock, flags); ++ set_bit(vdev->num, vhost->chgmap); ++ vhost->chgtype[vdev->num]--; ++ vhost->devices[vdev->num] = NULL; ++ vhost->num_devices--; ++ spin_unlock_irqrestore(&vhost->dev_lock, flags); + -+ entry->obj = obj; -+ entry->q = q; -+ entry->index = i; -+ } ++ vhba_device_put(vdev); + -+ *ret_q = q; -+ return 0; ++ schedule_work(&vhost->scan_devices); + -+err: -+ for (j = 0; j < i; j++) -+ put_obj(q->entries[j].obj); -+ kfree(q); -+ return -EINVAL; ++ return 0; +} + -+static void try_wake_any_obj(struct ntsync_obj *obj) ++static struct vhba_device *vhba_lookup_device (int devnum) +{ -+ switch (obj->type) { -+ case NTSYNC_TYPE_SEM: -+ try_wake_any_sem(obj); -+ break; -+ case NTSYNC_TYPE_MUTEX: -+ try_wake_any_mutex(obj); -+ break; -+ case NTSYNC_TYPE_EVENT: -+ try_wake_any_event(obj); -+ break; -+ } ++ struct vhba_host *vhost; ++ struct vhba_device *vdev = NULL; ++ unsigned long flags; ++ ++ vhost = platform_get_drvdata(&vhba_platform_device); ++ ++ if (likely(devnum < VHBA_MAX_DEVICES)) { ++ spin_lock_irqsave(&vhost->dev_lock, flags); ++ vdev = vhost->devices[devnum]; ++ if (vdev) { ++ vdev = vhba_device_get(vdev); ++ } ++ ++ spin_unlock_irqrestore(&vhost->dev_lock, flags); ++ } ++ ++ return vdev; +} + -+static int ntsync_wait_any(struct ntsync_device *dev, void __user *argp) ++static struct vhba_command *vhba_alloc_command (void) +{ -+ struct ntsync_wait_args args; -+ __u32 i, total_count; -+ struct ntsync_q *q; -+ int signaled; -+ bool all; -+ int ret; ++ struct vhba_host *vhost; ++ struct vhba_command *vcmd; ++ unsigned long flags; ++ int i; + -+ if (copy_from_user(&args, argp, sizeof(args))) -+ return -EFAULT; ++ vhost = platform_get_drvdata(&vhba_platform_device); + -+ ret = setup_wait(dev, &args, false, &q); -+ if (ret < 0) -+ return ret; ++ spin_lock_irqsave(&vhost->cmd_lock, flags); + -+ total_count = args.count; -+ if (args.alert) -+ total_count++; ++ vcmd = vhost->commands + vhost->cmd_next++; ++ if (vcmd->status != VHBA_REQ_FREE) { ++ for (i = 0; i < vhba_can_queue; i++) { ++ vcmd = vhost->commands + i; + -+ /* queue ourselves */ ++ if (vcmd->status == VHBA_REQ_FREE) { ++ vhost->cmd_next = i + 1; ++ break; ++ } ++ } + -+ for (i = 0; i < total_count; i++) { -+ struct ntsync_q_entry *entry = &q->entries[i]; -+ struct ntsync_obj *obj = entry->obj; ++ if (i == vhba_can_queue) { ++ vcmd = NULL; ++ } ++ } + -+ all = ntsync_lock_obj(dev, obj); -+ list_add_tail(&entry->node, &obj->any_waiters); -+ ntsync_unlock_obj(dev, obj, all); -+ } ++ if (vcmd) { ++ vcmd->status = VHBA_REQ_PENDING; ++ } + -+ /* -+ * Check if we are already signaled. -+ * -+ * Note that the API requires that normal objects are checked before -+ * the alert event. Hence we queue the alert event last, and check -+ * objects in order. -+ */ ++ vhost->cmd_next %= vhba_can_queue; + -+ for (i = 0; i < total_count; i++) { -+ struct ntsync_obj *obj = q->entries[i].obj; ++ spin_unlock_irqrestore(&vhost->cmd_lock, flags); + -+ if (atomic_read(&q->signaled) != -1) -+ break; ++ return vcmd; ++} + -+ all = ntsync_lock_obj(dev, obj); -+ try_wake_any_obj(obj); -+ ntsync_unlock_obj(dev, obj, all); -+ } ++static void vhba_free_command (struct vhba_command *vcmd) ++{ ++ struct vhba_host *vhost; ++ unsigned long flags; + -+ /* sleep */ ++ vhost = platform_get_drvdata(&vhba_platform_device); + -+ ret = ntsync_schedule(q, &args); ++ spin_lock_irqsave(&vhost->cmd_lock, flags); ++ vcmd->status = VHBA_REQ_FREE; ++ spin_unlock_irqrestore(&vhost->cmd_lock, flags); ++} + -+ /* and finally, unqueue */ ++static int vhba_queuecommand (struct Scsi_Host *shost, struct scsi_cmnd *cmd) ++{ ++ struct vhba_device *vdev; ++ int retval; ++ unsigned int devnum; + -+ for (i = 0; i < total_count; i++) { -+ struct ntsync_q_entry *entry = &q->entries[i]; -+ struct ntsync_obj *obj = entry->obj; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) ++ scmd_dbg(cmd, "queue %p tag %i\n", cmd, scsi_cmd_to_rq(cmd)->tag); ++#else ++ scmd_dbg(cmd, "queue %p tag %i\n", cmd, cmd->request->tag); ++#endif + -+ all = ntsync_lock_obj(dev, obj); -+ list_del(&entry->node); -+ ntsync_unlock_obj(dev, obj, all); ++ devnum = bus_and_id_to_devnum(cmd->device->channel, cmd->device->id); ++ vdev = vhba_lookup_device(devnum); ++ if (!vdev) { ++ scmd_dbg(cmd, "no such device\n"); + -+ put_obj(obj); -+ } ++ cmd->result = DID_NO_CONNECT << 16; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0) ++ scsi_done(cmd); ++#else ++ cmd->scsi_done(cmd); ++#endif + -+ signaled = atomic_read(&q->signaled); -+ if (signaled != -1) { -+ struct ntsync_wait_args __user *user_args = argp; ++ return 0; ++ } + -+ /* even if we caught a signal, we need to communicate success */ -+ ret = q->ownerdead ? -EOWNERDEAD : 0; ++ retval = vhba_device_queue(vdev, cmd); + -+ if (put_user(signaled, &user_args->index)) -+ ret = -EFAULT; -+ } else if (!ret) { -+ ret = -ETIMEDOUT; -+ } ++ vhba_device_put(vdev); + -+ kfree(q); -+ return ret; ++ return retval; +} + -+static int ntsync_wait_all(struct ntsync_device *dev, void __user *argp) ++static int vhba_abort (struct scsi_cmnd *cmd) +{ -+ struct ntsync_wait_args args; -+ struct ntsync_q *q; -+ int signaled; -+ __u32 i; -+ int ret; -+ -+ if (copy_from_user(&args, argp, sizeof(args))) -+ return -EFAULT; -+ -+ ret = setup_wait(dev, &args, true, &q); -+ if (ret < 0) -+ return ret; -+ -+ /* queue ourselves */ ++ struct vhba_device *vdev; ++ int retval = SUCCESS; ++ unsigned int devnum; + -+ mutex_lock(&dev->wait_all_lock); ++ scmd_dbg(cmd, "abort %p\n", cmd); + -+ for (i = 0; i < args.count; i++) { -+ struct ntsync_q_entry *entry = &q->entries[i]; -+ struct ntsync_obj *obj = entry->obj; ++ devnum = bus_and_id_to_devnum(cmd->device->channel, cmd->device->id); ++ vdev = vhba_lookup_device(devnum); ++ if (vdev) { ++ retval = vhba_device_dequeue(vdev, cmd); ++ vhba_device_put(vdev); ++ } else { ++ cmd->result = DID_NO_CONNECT << 16; ++ } + -+ atomic_inc(&obj->all_hint); ++ return retval; ++} + -+ /* -+ * obj->all_waiters is protected by dev->wait_all_lock rather -+ * than obj->lock, so there is no need to acquire obj->lock -+ * here. -+ */ -+ list_add_tail(&entry->node, &obj->all_waiters); -+ } -+ if (args.alert) { -+ struct ntsync_q_entry *entry = &q->entries[args.count]; -+ struct ntsync_obj *obj = entry->obj; ++static struct scsi_host_template vhba_template = { ++ .module = THIS_MODULE, ++ .name = "vhba", ++ .proc_name = "vhba", ++ .queuecommand = vhba_queuecommand, ++ .eh_abort_handler = vhba_abort, ++ .this_id = -1, ++ .max_sectors = VHBA_MAX_SECTORS_PER_IO, ++ .sg_tablesize = 256, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) ++ .slave_alloc = vhba_slave_alloc, ++#endif ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) ++ .tag_alloc_policy = BLK_TAG_ALLOC_RR, ++#endif ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) ++ .use_blk_tags = 1, ++#endif ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) ++ .max_segment_size = VHBA_KBUF_SIZE, ++#endif ++}; + -+ dev_lock_obj(dev, obj); -+ list_add_tail(&entry->node, &obj->any_waiters); -+ dev_unlock_obj(dev, obj); -+ } ++static ssize_t do_request (struct vhba_device *vdev, unsigned long metatag, struct scsi_cmnd *cmd, char __user *buf, size_t buf_len) ++{ ++ struct vhba_request vreq; ++ ssize_t ret; + -+ /* check if we are already signaled */ ++ scmd_dbg(cmd, "request %lu (%p), cdb 0x%x, bufflen %d, sg count %d\n", ++ metatag, cmd, cmd->cmnd[0], scsi_bufflen(cmd), scsi_sg_count(cmd)); + -+ try_wake_all(dev, q, NULL); ++ ret = sizeof(vreq); ++ if (DATA_TO_DEVICE(cmd->sc_data_direction)) { ++ ret += scsi_bufflen(cmd); ++ } + -+ mutex_unlock(&dev->wait_all_lock); ++ if (ret > buf_len) { ++ scmd_dbg(cmd, "buffer too small (%zd < %zd) for a request\n", buf_len, ret); ++ return -EIO; ++ } + -+ /* -+ * Check if the alert event is signaled, making sure to do so only -+ * after checking if the other objects are signaled. -+ */ ++ vreq.metatag = metatag; ++ vreq.lun = cmd->device->lun; ++ memcpy(vreq.cdb, cmd->cmnd, MAX_COMMAND_SIZE); ++ vreq.cdb_len = cmd->cmd_len; ++ vreq.data_len = scsi_bufflen(cmd); + -+ if (args.alert) { -+ struct ntsync_obj *obj = q->entries[args.count].obj; ++ if (copy_to_user(buf, &vreq, sizeof(vreq))) { ++ return -EFAULT; ++ } + -+ if (atomic_read(&q->signaled) == -1) { -+ bool all = ntsync_lock_obj(dev, obj); -+ try_wake_any_obj(obj); -+ ntsync_unlock_obj(dev, obj, all); -+ } -+ } ++ if (DATA_TO_DEVICE(cmd->sc_data_direction) && vreq.data_len) { ++ buf += sizeof(vreq); + -+ /* sleep */ ++ if (scsi_sg_count(cmd)) { ++ unsigned char *kaddr, *uaddr; ++ struct scatterlist *sglist = scsi_sglist(cmd); ++ struct scatterlist *sg; ++ int i; + -+ ret = ntsync_schedule(q, &args); ++ uaddr = (unsigned char *) buf; + -+ /* and finally, unqueue */ ++ for_each_sg(sglist, sg, scsi_sg_count(cmd), i) { ++ size_t len = sg->length; + -+ mutex_lock(&dev->wait_all_lock); ++ if (len > vdev->kbuf_size) { ++ scmd_dbg(cmd, "segment size (%zu) exceeds kbuf size (%zu)!", len, vdev->kbuf_size); ++ len = vdev->kbuf_size; ++ } + -+ for (i = 0; i < args.count; i++) { -+ struct ntsync_q_entry *entry = &q->entries[i]; -+ struct ntsync_obj *obj = entry->obj; ++ kaddr = kmap_atomic(sg_page(sg)); ++ memcpy(vdev->kbuf, kaddr + sg->offset, len); ++ kunmap_atomic(kaddr); + -+ /* -+ * obj->all_waiters is protected by dev->wait_all_lock rather -+ * than obj->lock, so there is no need to acquire it here. -+ */ -+ list_del(&entry->node); ++ if (copy_to_user(uaddr, vdev->kbuf, len)) { ++ return -EFAULT; ++ } ++ uaddr += len; ++ } ++ } else { ++ if (copy_to_user(buf, scsi_sglist(cmd), vreq.data_len)) { ++ return -EFAULT; ++ } ++ } ++ } + -+ atomic_dec(&obj->all_hint); ++ return ret; ++} + -+ put_obj(obj); -+ } ++static ssize_t do_response (struct vhba_device *vdev, unsigned long metatag, struct scsi_cmnd *cmd, const char __user *buf, size_t buf_len, struct vhba_response *res) ++{ ++ ssize_t ret = 0; + -+ mutex_unlock(&dev->wait_all_lock); ++ scmd_dbg(cmd, "response %lu (%p), status %x, data len %d, sg count %d\n", ++ metatag, cmd, res->status, res->data_len, scsi_sg_count(cmd)); + -+ if (args.alert) { -+ struct ntsync_q_entry *entry = &q->entries[args.count]; -+ struct ntsync_obj *obj = entry->obj; -+ bool all; ++ if (res->status) { ++ if (res->data_len > SCSI_SENSE_BUFFERSIZE) { ++ scmd_dbg(cmd, "truncate sense (%d < %d)", SCSI_SENSE_BUFFERSIZE, res->data_len); ++ res->data_len = SCSI_SENSE_BUFFERSIZE; ++ } + -+ all = ntsync_lock_obj(dev, obj); -+ list_del(&entry->node); -+ ntsync_unlock_obj(dev, obj, all); ++ if (copy_from_user(cmd->sense_buffer, buf, res->data_len)) { ++ return -EFAULT; ++ } + -+ put_obj(obj); -+ } ++ cmd->result = res->status; + -+ signaled = atomic_read(&q->signaled); -+ if (signaled != -1) { -+ struct ntsync_wait_args __user *user_args = argp; ++ ret += res->data_len; ++ } else if (DATA_FROM_DEVICE(cmd->sc_data_direction) && scsi_bufflen(cmd)) { ++ size_t to_read; + -+ /* even if we caught a signal, we need to communicate success */ -+ ret = q->ownerdead ? -EOWNERDEAD : 0; ++ if (res->data_len > scsi_bufflen(cmd)) { ++ scmd_dbg(cmd, "truncate data (%d < %d)\n", scsi_bufflen(cmd), res->data_len); ++ res->data_len = scsi_bufflen(cmd); ++ } + -+ if (put_user(signaled, &user_args->index)) -+ ret = -EFAULT; -+ } else if (!ret) { -+ ret = -ETIMEDOUT; -+ } ++ to_read = res->data_len; + -+ kfree(q); -+ return ret; - } - - static int ntsync_char_open(struct inode *inode, struct file *file) -@@ -198,6 +1156,8 @@ static int ntsync_char_open(struct inode *inode, struct file *file) - if (!dev) - return -ENOMEM; - -+ mutex_init(&dev->wait_all_lock); ++ if (scsi_sg_count(cmd)) { ++ unsigned char *kaddr, *uaddr; ++ struct scatterlist *sglist = scsi_sglist(cmd); ++ struct scatterlist *sg; ++ int i; + - file->private_data = dev; - dev->file = file; - return nonseekable_open(inode, file); -@@ -219,8 +1179,16 @@ static long ntsync_char_ioctl(struct file *file, unsigned int cmd, - void __user *argp = (void __user *)parm; - - switch (cmd) { -+ case NTSYNC_IOC_CREATE_EVENT: -+ return ntsync_create_event(dev, argp); -+ case NTSYNC_IOC_CREATE_MUTEX: -+ return ntsync_create_mutex(dev, argp); - case NTSYNC_IOC_CREATE_SEM: - return ntsync_create_sem(dev, argp); -+ case NTSYNC_IOC_WAIT_ALL: -+ return ntsync_wait_all(dev, argp); -+ case NTSYNC_IOC_WAIT_ANY: -+ return ntsync_wait_any(dev, argp); - default: - return -ENOIOCTLCMD; - } -diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c -index be67382c00f6..2b13cc31d1dd 100644 ---- a/drivers/net/wireless/ath/ath11k/core.c -+++ b/drivers/net/wireless/ath/ath11k/core.c -@@ -724,7 +724,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { - .name = "qca2066 hw2.1", - .hw_rev = ATH11K_HW_QCA2066_HW21, - .fw = { -- .dir = "QCA2066/hw2.1", -+ .dir = "QCA206X/hw2.1", - .board_size = 256 * 1024, - .cal_offset = 128 * 1024, - }, -diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile -index 038ccbd9e3ba..de5e4f5145af 100644 ---- a/drivers/pci/controller/Makefile -+++ b/drivers/pci/controller/Makefile -@@ -1,4 +1,10 @@ - # SPDX-License-Identifier: GPL-2.0 -+ifdef CONFIG_X86_64 -+ifdef CONFIG_SATA_AHCI -+obj-y += intel-nvme-remap.o -+endif -+endif ++ uaddr = (unsigned char *)buf; + - obj-$(CONFIG_PCIE_CADENCE) += cadence/ - obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o - obj-$(CONFIG_PCI_IXP4XX) += pci-ixp4xx.o -diff --git a/drivers/pci/controller/intel-nvme-remap.c b/drivers/pci/controller/intel-nvme-remap.c -new file mode 100644 -index 000000000000..e105e6f5cc91 ---- /dev/null -+++ b/drivers/pci/controller/intel-nvme-remap.c -@@ -0,0 +1,462 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Intel remapped NVMe device support. -+ * -+ * Copyright (c) 2019 Endless Mobile, Inc. -+ * Author: Daniel Drake -+ * -+ * Some products ship by default with the SATA controller in "RAID" or -+ * "Intel RST Premium With Intel Optane System Acceleration" mode. Under this -+ * mode, which we refer to as "remapped NVMe" mode, any installed NVMe -+ * devices disappear from the PCI bus, and instead their I/O memory becomes -+ * available within the AHCI device BARs. -+ * -+ * This scheme is understood to be a way of avoiding usage of the standard -+ * Windows NVMe driver under that OS, instead mandating usage of Intel's -+ * driver instead, which has better power management, and presumably offers -+ * some RAID/disk-caching solutions too. -+ * -+ * Here in this driver, we support the remapped NVMe mode by claiming the -+ * AHCI device and creating a fake PCIe root port. On the new bus, the -+ * original AHCI device is exposed with only minor tweaks. Then, fake PCI -+ * devices corresponding to the remapped NVMe devices are created. The usual -+ * ahci and nvme drivers are then expected to bind to these devices and -+ * operate as normal. -+ * -+ * The PCI configuration space for the NVMe devices is completely -+ * unavailable, so we fake a minimal one and hope for the best. -+ * -+ * Interrupts are shared between the AHCI and NVMe devices. For simplicity, -+ * we only support the legacy interrupt here, although MSI support -+ * could potentially be added later. -+ */ ++ for_each_sg(sglist, sg, scsi_sg_count(cmd), i) { ++ size_t len = (sg->length < to_read) ? sg->length : to_read; + -+#define MODULE_NAME "intel-nvme-remap" ++ if (len > vdev->kbuf_size) { ++ scmd_dbg(cmd, "segment size (%zu) exceeds kbuf size (%zu)!", len, vdev->kbuf_size); ++ len = vdev->kbuf_size; ++ } + -+#include -+#include -+#include -+#include -+#include ++ if (copy_from_user(vdev->kbuf, uaddr, len)) { ++ return -EFAULT; ++ } ++ uaddr += len; + -+#define AHCI_PCI_BAR_STANDARD 5 ++ kaddr = kmap_atomic(sg_page(sg)); ++ memcpy(kaddr + sg->offset, vdev->kbuf, len); ++ kunmap_atomic(kaddr); + -+struct nvme_remap_dev { -+ struct pci_dev *dev; /* AHCI device */ -+ struct pci_bus *bus; /* our fake PCI bus */ -+ struct pci_sysdata sysdata; -+ int irq_base; /* our fake interrupts */ ++ to_read -= len; ++ if (to_read == 0) { ++ break; ++ } ++ } ++ } else { ++ if (copy_from_user(scsi_sglist(cmd), buf, res->data_len)) { ++ return -EFAULT; ++ } + -+ /* -+ * When we detect an all-ones write to a BAR register, this flag -+ * is set, so that we return the BAR size on the next read (a -+ * standard PCI behaviour). -+ * This includes the assumption that an all-ones BAR write is -+ * immediately followed by a read of the same register. -+ */ -+ bool bar_sizing; ++ to_read -= res->data_len; ++ } + -+ /* -+ * Resources copied from the AHCI device, to be regarded as -+ * resources on our fake bus. -+ */ -+ struct resource ahci_resources[PCI_NUM_RESOURCES]; ++ scsi_set_resid(cmd, to_read); + -+ /* Resources corresponding to the NVMe devices. */ -+ struct resource remapped_dev_mem[AHCI_MAX_REMAP]; ++ ret += res->data_len - to_read; ++ } + -+ /* Number of remapped NVMe devices found. */ -+ int num_remapped_devices; -+}; ++ return ret; ++} + -+static inline struct nvme_remap_dev *nrdev_from_bus(struct pci_bus *bus) ++static struct vhba_command *next_command (struct vhba_device *vdev) +{ -+ return container_of(bus->sysdata, struct nvme_remap_dev, sysdata); -+} ++ struct vhba_command *vcmd; ++ ++ list_for_each_entry(vcmd, &vdev->cmd_list, entry) { ++ if (vcmd->status == VHBA_REQ_PENDING) { ++ break; ++ } ++ } + ++ if (&vcmd->entry == &vdev->cmd_list) { ++ vcmd = NULL; ++ } + -+/******** PCI configuration space **********/ ++ return vcmd; ++} + -+/* -+ * Helper macros for tweaking returned contents of PCI configuration space. -+ * -+ * value contains len bytes of data read from reg. -+ * If fixup_reg is included in that range, fix up the contents of that -+ * register to fixed_value. -+ */ -+#define NR_FIX8(fixup_reg, fixed_value) do { \ -+ if (reg <= fixup_reg && fixup_reg < reg + len) \ -+ ((u8 *) value)[fixup_reg - reg] = (u8) (fixed_value); \ -+ } while (0) ++static struct vhba_command *match_command (struct vhba_device *vdev, __u32 metatag) ++{ ++ struct vhba_command *vcmd; + -+#define NR_FIX16(fixup_reg, fixed_value) do { \ -+ NR_FIX8(fixup_reg, fixed_value); \ -+ NR_FIX8(fixup_reg + 1, fixed_value >> 8); \ -+ } while (0) ++ list_for_each_entry(vcmd, &vdev->cmd_list, entry) { ++ if (vcmd->metatag == metatag) { ++ break; ++ } ++ } + -+#define NR_FIX24(fixup_reg, fixed_value) do { \ -+ NR_FIX8(fixup_reg, fixed_value); \ -+ NR_FIX8(fixup_reg + 1, fixed_value >> 8); \ -+ NR_FIX8(fixup_reg + 2, fixed_value >> 16); \ -+ } while (0) ++ if (&vcmd->entry == &vdev->cmd_list) { ++ vcmd = NULL; ++ } + -+#define NR_FIX32(fixup_reg, fixed_value) do { \ -+ NR_FIX16(fixup_reg, (u16) fixed_value); \ -+ NR_FIX16(fixup_reg + 2, fixed_value >> 16); \ -+ } while (0) ++ return vcmd; ++} + -+/* -+ * Read PCI config space of the slot 0 (AHCI) device. -+ * We pass through the read request to the underlying device, but -+ * tweak the results in some cases. -+ */ -+static int nvme_remap_pci_read_slot0(struct pci_bus *bus, int reg, -+ int len, u32 *value) ++static struct vhba_command *wait_command (struct vhba_device *vdev, unsigned long flags) +{ -+ struct nvme_remap_dev *nrdev = nrdev_from_bus(bus); -+ struct pci_bus *ahci_dev_bus = nrdev->dev->bus; -+ int ret; ++ struct vhba_command *vcmd; ++ DEFINE_WAIT(wait); + -+ ret = ahci_dev_bus->ops->read(ahci_dev_bus, nrdev->dev->devfn, -+ reg, len, value); -+ if (ret) -+ return ret; ++ while (!(vcmd = next_command(vdev))) { ++ if (signal_pending(current)) { ++ break; ++ } + -+ /* -+ * Adjust the device class, to prevent this driver from attempting to -+ * additionally probe the device we're simulating here. -+ */ -+ NR_FIX24(PCI_CLASS_PROG, PCI_CLASS_STORAGE_SATA_AHCI); ++ prepare_to_wait(&vdev->cmd_wq, &wait, TASK_INTERRUPTIBLE); + -+ /* -+ * Unset interrupt pin, otherwise ACPI tries to find routing -+ * info for our virtual IRQ, fails, and complains. -+ */ -+ NR_FIX8(PCI_INTERRUPT_PIN, 0); ++ spin_unlock_irqrestore(&vdev->cmd_lock, flags); + -+ /* -+ * Truncate the AHCI BAR to not include the region that covers the -+ * hidden devices. This will cause the ahci driver to successfully -+ * probe th new device (instead of handing it over to this driver). -+ */ -+ if (nrdev->bar_sizing) { -+ NR_FIX32(PCI_BASE_ADDRESS_5, ~(SZ_16K - 1)); -+ nrdev->bar_sizing = false; -+ } ++ schedule(); + -+ return PCIBIOS_SUCCESSFUL; ++ spin_lock_irqsave(&vdev->cmd_lock, flags); ++ } ++ ++ finish_wait(&vdev->cmd_wq, &wait); ++ if (vcmd) { ++ vcmd->status = VHBA_REQ_READING; ++ } ++ ++ return vcmd; +} + -+/* -+ * Read PCI config space of a remapped device. -+ * Since the original PCI config space is inaccessible, we provide a minimal, -+ * fake config space instead. -+ */ -+static int nvme_remap_pci_read_remapped(struct pci_bus *bus, unsigned int port, -+ int reg, int len, u32 *value) ++static ssize_t vhba_ctl_read (struct file *file, char __user *buf, size_t buf_len, loff_t *offset) +{ -+ struct nvme_remap_dev *nrdev = nrdev_from_bus(bus); -+ struct resource *remapped_mem; ++ struct vhba_device *vdev; ++ struct vhba_command *vcmd; ++ ssize_t ret; ++ unsigned long flags; + -+ if (port > nrdev->num_remapped_devices) -+ return PCIBIOS_DEVICE_NOT_FOUND; ++ vdev = file->private_data; + -+ *value = 0; -+ remapped_mem = &nrdev->remapped_dev_mem[port - 1]; ++ /* Get next command */ ++ if (file->f_flags & O_NONBLOCK) { ++ /* Non-blocking variant */ ++ spin_lock_irqsave(&vdev->cmd_lock, flags); ++ vcmd = next_command(vdev); ++ spin_unlock_irqrestore(&vdev->cmd_lock, flags); + -+ /* Set a Vendor ID, otherwise Linux assumes no device is present */ -+ NR_FIX16(PCI_VENDOR_ID, PCI_VENDOR_ID_INTEL); ++ if (!vcmd) { ++ return -EWOULDBLOCK; ++ } ++ } else { ++ /* Blocking variant */ ++ spin_lock_irqsave(&vdev->cmd_lock, flags); ++ vcmd = wait_command(vdev, flags); ++ spin_unlock_irqrestore(&vdev->cmd_lock, flags); + -+ /* Always appear on & bus mastering */ -+ NR_FIX16(PCI_COMMAND, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); ++ if (!vcmd) { ++ return -ERESTARTSYS; ++ } ++ } + -+ /* Set class so that nvme driver probes us */ -+ NR_FIX24(PCI_CLASS_PROG, PCI_CLASS_STORAGE_EXPRESS); ++ ret = do_request(vdev, vcmd->metatag, vcmd->cmd, buf, buf_len); + -+ if (nrdev->bar_sizing) { -+ NR_FIX32(PCI_BASE_ADDRESS_0, -+ ~(resource_size(remapped_mem) - 1)); -+ nrdev->bar_sizing = false; -+ } else { -+ resource_size_t mem_start = remapped_mem->start; ++ spin_lock_irqsave(&vdev->cmd_lock, flags); ++ if (ret >= 0) { ++ vcmd->status = VHBA_REQ_SENT; ++ *offset += ret; ++ } else { ++ vcmd->status = VHBA_REQ_PENDING; ++ } + -+ mem_start |= PCI_BASE_ADDRESS_MEM_TYPE_64; -+ NR_FIX32(PCI_BASE_ADDRESS_0, mem_start); -+ mem_start >>= 32; -+ NR_FIX32(PCI_BASE_ADDRESS_1, mem_start); -+ } ++ spin_unlock_irqrestore(&vdev->cmd_lock, flags); + -+ return PCIBIOS_SUCCESSFUL; ++ return ret; +} + -+/* Read PCI configuration space. */ -+static int nvme_remap_pci_read(struct pci_bus *bus, unsigned int devfn, -+ int reg, int len, u32 *value) ++static ssize_t vhba_ctl_write (struct file *file, const char __user *buf, size_t buf_len, loff_t *offset) +{ -+ if (PCI_SLOT(devfn) == 0) -+ return nvme_remap_pci_read_slot0(bus, reg, len, value); -+ else -+ return nvme_remap_pci_read_remapped(bus, PCI_SLOT(devfn), -+ reg, len, value); -+} ++ struct vhba_device *vdev; ++ struct vhba_command *vcmd; ++ struct vhba_response res; ++ ssize_t ret; ++ unsigned long flags; + -+/* -+ * Write PCI config space of the slot 0 (AHCI) device. -+ * Apart from the special case of BAR sizing, we disable all writes. -+ * Otherwise, the ahci driver could make changes (e.g. unset PCI bus master) -+ * that would affect the operation of the NVMe devices. -+ */ -+static int nvme_remap_pci_write_slot0(struct pci_bus *bus, int reg, -+ int len, u32 value) -+{ -+ struct nvme_remap_dev *nrdev = nrdev_from_bus(bus); -+ struct pci_bus *ahci_dev_bus = nrdev->dev->bus; ++ if (buf_len < sizeof(res)) { ++ return -EIO; ++ } + -+ if (reg >= PCI_BASE_ADDRESS_0 && reg <= PCI_BASE_ADDRESS_5) { -+ /* -+ * Writing all-ones to a BAR means that the size of the -+ * memory region is being checked. Flag this so that we can -+ * reply with an appropriate size on the next read. -+ */ -+ if (value == ~0) -+ nrdev->bar_sizing = true; ++ if (copy_from_user(&res, buf, sizeof(res))) { ++ return -EFAULT; ++ } + -+ return ahci_dev_bus->ops->write(ahci_dev_bus, -+ nrdev->dev->devfn, -+ reg, len, value); -+ } ++ vdev = file->private_data; + -+ return PCIBIOS_SET_FAILED; -+} ++ spin_lock_irqsave(&vdev->cmd_lock, flags); ++ vcmd = match_command(vdev, res.metatag); ++ if (!vcmd || vcmd->status != VHBA_REQ_SENT) { ++ spin_unlock_irqrestore(&vdev->cmd_lock, flags); ++ pr_debug("ctl dev #%u not expecting response\n", vdev->num); ++ return -EIO; ++ } ++ vcmd->status = VHBA_REQ_WRITING; ++ spin_unlock_irqrestore(&vdev->cmd_lock, flags); + -+/* -+ * Write PCI config space of a remapped device. -+ * Since the original PCI config space is inaccessible, we reject all -+ * writes, except for the special case of BAR probing. -+ */ -+static int nvme_remap_pci_write_remapped(struct pci_bus *bus, -+ unsigned int port, -+ int reg, int len, u32 value) -+{ -+ struct nvme_remap_dev *nrdev = nrdev_from_bus(bus); ++ ret = do_response(vdev, vcmd->metatag, vcmd->cmd, buf + sizeof(res), buf_len - sizeof(res), &res); + -+ if (port > nrdev->num_remapped_devices) -+ return PCIBIOS_DEVICE_NOT_FOUND; ++ spin_lock_irqsave(&vdev->cmd_lock, flags); ++ if (ret >= 0) { ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0) ++ scsi_done(vcmd->cmd); ++#else ++ vcmd->cmd->scsi_done(vcmd->cmd); ++#endif ++ ret += sizeof(res); + -+ /* -+ * Writing all-ones to a BAR means that the size of the memory -+ * region is being checked. Flag this so that we can reply with -+ * an appropriate size on the next read. -+ */ -+ if (value == ~0 && reg >= PCI_BASE_ADDRESS_0 -+ && reg <= PCI_BASE_ADDRESS_5) { -+ nrdev->bar_sizing = true; -+ return PCIBIOS_SUCCESSFUL; -+ } ++ /* don't compete with vhba_device_dequeue */ ++ if (!list_empty(&vcmd->entry)) { ++ list_del_init(&vcmd->entry); ++ vhba_free_command(vcmd); ++ } ++ } else { ++ vcmd->status = VHBA_REQ_SENT; ++ } + -+ return PCIBIOS_SET_FAILED; ++ spin_unlock_irqrestore(&vdev->cmd_lock, flags); ++ ++ return ret; +} + -+/* Write PCI configuration space. */ -+static int nvme_remap_pci_write(struct pci_bus *bus, unsigned int devfn, -+ int reg, int len, u32 value) ++static long vhba_ctl_ioctl (struct file *file, unsigned int cmd, unsigned long arg) +{ -+ if (PCI_SLOT(devfn) == 0) -+ return nvme_remap_pci_write_slot0(bus, reg, len, value); -+ else -+ return nvme_remap_pci_write_remapped(bus, PCI_SLOT(devfn), -+ reg, len, value); -+} ++ struct vhba_device *vdev = file->private_data; ++ struct vhba_host *vhost = platform_get_drvdata(&vhba_platform_device); + -+static struct pci_ops nvme_remap_pci_ops = { -+ .read = nvme_remap_pci_read, -+ .write = nvme_remap_pci_write, -+}; ++ switch (cmd) { ++ case 0xBEEF001: { ++ unsigned int ident[4]; /* host, channel, id, lun */ + ++ ident[0] = vhost->shost->host_no; ++ devnum_to_bus_and_id(vdev->num, &ident[1], &ident[2]); ++ ident[3] = 0; /* lun */ + -+/******** Initialization & exit **********/ ++ if (copy_to_user((void *) arg, ident, sizeof(ident))) { ++ return -EFAULT; ++ } + -+/* -+ * Find a PCI domain ID to use for our fake bus. -+ * Start at 0x10000 to not clash with ACPI _SEG domains (16 bits). -+ */ -+static int find_free_domain(void) -+{ -+ int domain = 0xffff; -+ struct pci_bus *bus = NULL; ++ return 0; ++ } ++ case 0xBEEF002: { ++ unsigned int devnum = vdev->num; + -+ while ((bus = pci_find_next_bus(bus)) != NULL) -+ domain = max_t(int, domain, pci_domain_nr(bus)); ++ if (copy_to_user((void *) arg, &devnum, sizeof(devnum))) { ++ return -EFAULT; ++ } + -+ return domain + 1; ++ return 0; ++ } ++ } ++ ++ return -ENOTTY; +} + -+static int find_remapped_devices(struct nvme_remap_dev *nrdev, -+ struct list_head *resources) ++#ifdef CONFIG_COMPAT ++static long vhba_ctl_compat_ioctl (struct file *file, unsigned int cmd, unsigned long arg) +{ -+ void __iomem *mmio; -+ int i, count = 0; -+ u32 cap; ++ unsigned long compat_arg = (unsigned long)compat_ptr(arg); ++ return vhba_ctl_ioctl(file, cmd, compat_arg); ++} ++#endif + -+ mmio = pcim_iomap(nrdev->dev, AHCI_PCI_BAR_STANDARD, -+ pci_resource_len(nrdev->dev, -+ AHCI_PCI_BAR_STANDARD)); -+ if (!mmio) -+ return -ENODEV; ++static unsigned int vhba_ctl_poll (struct file *file, poll_table *wait) ++{ ++ struct vhba_device *vdev = file->private_data; ++ unsigned int mask = 0; ++ unsigned long flags; + -+ /* Check if this device might have remapped nvme devices. */ -+ if (pci_resource_len(nrdev->dev, AHCI_PCI_BAR_STANDARD) < SZ_512K || -+ !(readl(mmio + AHCI_VSCAP) & 1)) -+ return -ENODEV; ++ poll_wait(file, &vdev->cmd_wq, wait); + -+ cap = readq(mmio + AHCI_REMAP_CAP); -+ for (i = AHCI_MAX_REMAP-1; i >= 0; i--) { -+ struct resource *remapped_mem; ++ spin_lock_irqsave(&vdev->cmd_lock, flags); ++ if (next_command(vdev)) { ++ mask |= POLLIN | POLLRDNORM; ++ } ++ spin_unlock_irqrestore(&vdev->cmd_lock, flags); + -+ if ((cap & (1 << i)) == 0) -+ continue; -+ if (readl(mmio + ahci_remap_dcc(i)) -+ != PCI_CLASS_STORAGE_EXPRESS) -+ continue; ++ return mask; ++} + -+ /* We've found a remapped device */ -+ remapped_mem = &nrdev->remapped_dev_mem[count++]; -+ remapped_mem->start = -+ pci_resource_start(nrdev->dev, AHCI_PCI_BAR_STANDARD) -+ + ahci_remap_base(i); -+ remapped_mem->end = remapped_mem->start -+ + AHCI_REMAP_N_SIZE - 1; -+ remapped_mem->flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED; -+ pci_add_resource(resources, remapped_mem); -+ } ++static int vhba_ctl_open (struct inode *inode, struct file *file) ++{ ++ struct vhba_device *vdev; ++ int retval; + -+ pcim_iounmap(nrdev->dev, mmio); ++ pr_debug("ctl dev open\n"); + -+ if (count == 0) -+ return -ENODEV; ++ /* check if vhba is probed */ ++ if (!platform_get_drvdata(&vhba_platform_device)) { ++ return -ENODEV; ++ } + -+ nrdev->num_remapped_devices = count; -+ dev_info(&nrdev->dev->dev, "Found %d remapped NVMe devices\n", -+ nrdev->num_remapped_devices); -+ return 0; ++ vdev = vhba_device_alloc(); ++ if (!vdev) { ++ return -ENOMEM; ++ } ++ ++ vdev->kbuf_size = VHBA_KBUF_SIZE; ++ vdev->kbuf = kzalloc(vdev->kbuf_size, GFP_KERNEL); ++ if (!vdev->kbuf) { ++ return -ENOMEM; ++ } ++ ++ if (!(retval = vhba_add_device(vdev))) { ++ file->private_data = vdev; ++ } ++ ++ vhba_device_put(vdev); ++ ++ return retval; +} + -+static void nvme_remap_remove_root_bus(void *data) ++static int vhba_ctl_release (struct inode *inode, struct file *file) +{ -+ struct pci_bus *bus = data; ++ struct vhba_device *vdev; ++ struct vhba_command *vcmd; ++ unsigned long flags; + -+ pci_stop_root_bus(bus); -+ pci_remove_root_bus(bus); ++ vdev = file->private_data; ++ ++ pr_debug("ctl dev release\n"); ++ ++ vhba_device_get(vdev); ++ vhba_remove_device(vdev); ++ ++ spin_lock_irqsave(&vdev->cmd_lock, flags); ++ list_for_each_entry(vcmd, &vdev->cmd_list, entry) { ++ WARN_ON(vcmd->status == VHBA_REQ_READING || vcmd->status == VHBA_REQ_WRITING); ++ ++ scmd_dbg(vcmd->cmd, "device released with command %lu (%p)\n", vcmd->metatag, vcmd->cmd); ++ vcmd->cmd->result = DID_NO_CONNECT << 16; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0) ++ scsi_done(vcmd->cmd); ++#else ++ vcmd->cmd->scsi_done(vcmd->cmd); ++#endif ++ vhba_free_command(vcmd); ++ } ++ INIT_LIST_HEAD(&vdev->cmd_list); ++ spin_unlock_irqrestore(&vdev->cmd_lock, flags); ++ ++ kfree(vdev->kbuf); ++ vdev->kbuf = NULL; ++ ++ vhba_device_put(vdev); ++ ++ return 0; +} + -+static int nvme_remap_probe(struct pci_dev *dev, -+ const struct pci_device_id *id) -+{ -+ struct nvme_remap_dev *nrdev; -+ LIST_HEAD(resources); -+ int i; -+ int ret; -+ struct pci_dev *child; ++static struct file_operations vhba_ctl_fops = { ++ .owner = THIS_MODULE, ++ .open = vhba_ctl_open, ++ .release = vhba_ctl_release, ++ .read = vhba_ctl_read, ++ .write = vhba_ctl_write, ++ .poll = vhba_ctl_poll, ++ .unlocked_ioctl = vhba_ctl_ioctl, ++#ifdef CONFIG_COMPAT ++ .compat_ioctl = vhba_ctl_compat_ioctl, ++#endif ++}; + -+ nrdev = devm_kzalloc(&dev->dev, sizeof(*nrdev), GFP_KERNEL); -+ nrdev->sysdata.domain = find_free_domain(); -+ nrdev->sysdata.nvme_remap_dev = dev; -+ nrdev->dev = dev; -+ pci_set_drvdata(dev, nrdev); ++static struct miscdevice vhba_miscdev = { ++ .minor = MISC_DYNAMIC_MINOR, ++ .name = "vhba_ctl", ++ .fops = &vhba_ctl_fops, ++}; + -+ ret = pcim_enable_device(dev); -+ if (ret < 0) -+ return ret; ++static int vhba_probe (struct platform_device *pdev) ++{ ++ struct Scsi_Host *shost; ++ struct vhba_host *vhost; ++ int i; + -+ pci_set_master(dev); ++ vhba_can_queue = clamp(vhba_can_queue, 1, 256); + -+ ret = find_remapped_devices(nrdev, &resources); -+ if (ret) -+ return ret; ++ shost = scsi_host_alloc(&vhba_template, sizeof(struct vhba_host)); ++ if (!shost) { ++ return -ENOMEM; ++ } + -+ /* Add resources from the original AHCI device */ -+ for (i = 0; i < PCI_NUM_RESOURCES; i++) { -+ struct resource *res = &dev->resource[i]; ++ shost->max_channel = VHBA_MAX_BUS-1; ++ shost->max_id = VHBA_MAX_ID; ++ /* we don't support lun > 0 */ ++ shost->max_lun = 1; ++ shost->max_cmd_len = MAX_COMMAND_SIZE; ++ shost->can_queue = vhba_can_queue; ++ shost->cmd_per_lun = vhba_can_queue; + -+ if (res->start) { -+ struct resource *nr_res = &nrdev->ahci_resources[i]; ++ vhost = (struct vhba_host *)shost->hostdata; ++ memset(vhost, 0, sizeof(struct vhba_host)); + -+ nr_res->start = res->start; -+ nr_res->end = res->end; -+ nr_res->flags = res->flags; -+ pci_add_resource(&resources, nr_res); -+ } -+ } ++ vhost->shost = shost; ++ vhost->num_devices = 0; ++ spin_lock_init(&vhost->dev_lock); ++ spin_lock_init(&vhost->cmd_lock); ++ INIT_WORK(&vhost->scan_devices, vhba_scan_devices); ++ vhost->cmd_next = 0; ++ vhost->commands = kzalloc(vhba_can_queue * sizeof(struct vhba_command), GFP_KERNEL); ++ if (!vhost->commands) { ++ return -ENOMEM; ++ } + -+ /* Create virtual interrupts */ -+ nrdev->irq_base = devm_irq_alloc_descs(&dev->dev, -1, 0, -+ nrdev->num_remapped_devices + 1, -+ 0); -+ if (nrdev->irq_base < 0) -+ return nrdev->irq_base; ++ for (i = 0; i < vhba_can_queue; i++) { ++ vhost->commands[i].status = VHBA_REQ_FREE; ++ } + -+ /* Create and populate PCI bus */ -+ nrdev->bus = pci_create_root_bus(&dev->dev, 0, &nvme_remap_pci_ops, -+ &nrdev->sysdata, &resources); -+ if (!nrdev->bus) -+ return -ENODEV; ++ platform_set_drvdata(pdev, vhost); + -+ if (devm_add_action_or_reset(&dev->dev, nvme_remap_remove_root_bus, -+ nrdev->bus)) -+ return -ENOMEM; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) ++ i = scsi_init_shared_tag_map(shost, vhba_can_queue); ++ if (i) return i; ++#endif + -+ /* We don't support sharing MSI interrupts between these devices */ -+ nrdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI; ++ if (scsi_add_host(shost, &pdev->dev)) { ++ scsi_host_put(shost); ++ return -ENOMEM; ++ } + -+ pci_scan_child_bus(nrdev->bus); ++ return 0; ++} + -+ list_for_each_entry(child, &nrdev->bus->devices, bus_list) { -+ /* -+ * Prevent PCI core from trying to move memory BARs around. -+ * The hidden NVMe devices are at fixed locations. -+ */ -+ for (i = 0; i < PCI_NUM_RESOURCES; i++) { -+ struct resource *res = &child->resource[i]; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 11, 0) ++static int vhba_remove (struct platform_device *pdev) ++#else ++static void vhba_remove (struct platform_device *pdev) ++#endif ++{ ++ struct vhba_host *vhost; ++ struct Scsi_Host *shost; + -+ if (res->flags & IORESOURCE_MEM) -+ res->flags |= IORESOURCE_PCI_FIXED; -+ } ++ vhost = platform_get_drvdata(pdev); ++ shost = vhost->shost; + -+ /* Share the legacy IRQ between all devices */ -+ child->irq = dev->irq; -+ } ++ scsi_remove_host(shost); ++ scsi_host_put(shost); + -+ pci_assign_unassigned_bus_resources(nrdev->bus); -+ pci_bus_add_devices(nrdev->bus); ++ kfree(vhost->commands); + -+ return 0; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 11, 0) ++ return 0; ++#endif +} + -+static const struct pci_device_id nvme_remap_ids[] = { -+ /* -+ * Match all Intel RAID controllers. -+ * -+ * There's overlap here with the set of devices detected by the ahci -+ * driver, but ahci will only successfully probe when there -+ * *aren't* any remapped NVMe devices, and this driver will only -+ * successfully probe when there *are* remapped NVMe devices that -+ * need handling. -+ */ -+ { -+ PCI_VDEVICE(INTEL, PCI_ANY_ID), -+ .class = PCI_CLASS_STORAGE_RAID << 8, -+ .class_mask = 0xffffff00, -+ }, -+ {0,} -+}; -+MODULE_DEVICE_TABLE(pci, nvme_remap_ids); ++static void vhba_release (struct device * dev) ++{ ++ return; ++} + -+static struct pci_driver nvme_remap_drv = { -+ .name = MODULE_NAME, -+ .id_table = nvme_remap_ids, -+ .probe = nvme_remap_probe, ++static struct platform_device vhba_platform_device = { ++ .name = "vhba", ++ .id = -1, ++ .dev = { ++ .release = vhba_release, ++ }, +}; -+module_pci_driver(nvme_remap_drv); -+ -+MODULE_AUTHOR("Daniel Drake "); -+MODULE_LICENSE("GPL v2"); -diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c -index 76f4df75b08a..49c1a91c611d 100644 ---- a/drivers/pci/quirks.c -+++ b/drivers/pci/quirks.c -@@ -3746,6 +3746,106 @@ static void quirk_no_bus_reset(struct pci_dev *dev) - dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; - } - -+static bool acs_on_downstream; -+static bool acs_on_multifunction; + -+#define NUM_ACS_IDS 16 -+struct acs_on_id { -+ unsigned short vendor; -+ unsigned short device; ++static struct platform_driver vhba_platform_driver = { ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "vhba", ++ }, ++ .probe = vhba_probe, ++ .remove = vhba_remove, +}; -+static struct acs_on_id acs_on_ids[NUM_ACS_IDS]; -+static u8 max_acs_id; + -+static __init int pcie_acs_override_setup(char *p) ++static int __init vhba_init (void) +{ -+ if (!p) -+ return -EINVAL; ++ int ret; + -+ while (*p) { -+ if (!strncmp(p, "downstream", 10)) -+ acs_on_downstream = true; -+ if (!strncmp(p, "multifunction", 13)) -+ acs_on_multifunction = true; -+ if (!strncmp(p, "id:", 3)) { -+ char opt[5]; -+ int ret; -+ long val; ++ ret = platform_device_register(&vhba_platform_device); ++ if (ret < 0) { ++ return ret; ++ } + -+ if (max_acs_id >= NUM_ACS_IDS - 1) { -+ pr_warn("Out of PCIe ACS override slots (%d)\n", -+ NUM_ACS_IDS); -+ goto next; -+ } ++ ret = platform_driver_register(&vhba_platform_driver); ++ if (ret < 0) { ++ platform_device_unregister(&vhba_platform_device); ++ return ret; ++ } + -+ p += 3; -+ snprintf(opt, 5, "%s", p); -+ ret = kstrtol(opt, 16, &val); -+ if (ret) { -+ pr_warn("PCIe ACS ID parse error %d\n", ret); -+ goto next; -+ } -+ acs_on_ids[max_acs_id].vendor = val; ++ ret = misc_register(&vhba_miscdev); ++ if (ret < 0) { ++ platform_driver_unregister(&vhba_platform_driver); ++ platform_device_unregister(&vhba_platform_device); ++ return ret; ++ } + -+ p += strcspn(p, ":"); -+ if (*p != ':') { -+ pr_warn("PCIe ACS invalid ID\n"); -+ goto next; -+ } ++ return 0; ++} + -+ p++; -+ snprintf(opt, 5, "%s", p); -+ ret = kstrtol(opt, 16, &val); -+ if (ret) { -+ pr_warn("PCIe ACS ID parse error %d\n", ret); -+ goto next; -+ } -+ acs_on_ids[max_acs_id].device = val; -+ max_acs_id++; -+ } -+next: -+ p += strcspn(p, ","); -+ if (*p == ',') -+ p++; -+ } ++static void __exit vhba_exit(void) ++{ ++ misc_deregister(&vhba_miscdev); ++ platform_driver_unregister(&vhba_platform_driver); ++ platform_device_unregister(&vhba_platform_device); ++} + -+ if (acs_on_downstream || acs_on_multifunction || max_acs_id) -+ pr_warn("Warning: PCIe ACS overrides enabled; This may allow non-IOMMU protected peer-to-peer DMA\n"); ++module_init(vhba_init); ++module_exit(vhba_exit); + -+ return 0; -+} -+early_param("pcie_acs_override", pcie_acs_override_setup); +diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig +index 075e775d3868..e1cc0d60eeb6 100644 +--- a/drivers/staging/Kconfig ++++ b/drivers/staging/Kconfig +@@ -50,4 +50,6 @@ source "drivers/staging/vme_user/Kconfig" + + source "drivers/staging/gpib/Kconfig" + ++source "drivers/staging/apple-bce/Kconfig" ++ + endif # STAGING +diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile +index e681e403509c..4045c588b3b4 100644 +--- a/drivers/staging/Makefile ++++ b/drivers/staging/Makefile +@@ -14,3 +14,4 @@ obj-$(CONFIG_GREYBUS) += greybus/ + obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/ + obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ + obj-$(CONFIG_GPIB) += gpib/ ++obj-$(CONFIG_APPLE_BCE) += apple-bce/ +diff --git a/drivers/staging/apple-bce/Kconfig b/drivers/staging/apple-bce/Kconfig +new file mode 100644 +index 000000000000..fe92bc441e89 +--- /dev/null ++++ b/drivers/staging/apple-bce/Kconfig +@@ -0,0 +1,18 @@ ++config APPLE_BCE ++ tristate "Apple BCE driver (VHCI and Audio support)" ++ default m ++ depends on X86 ++ select SOUND ++ select SND ++ select SND_PCM ++ select SND_JACK ++ help ++ VHCI and audio support on Apple MacBooks with the T2 Chip. ++ This driver is divided in three components: ++ - BCE (Buffer Copy Engine): which establishes a basic communication ++ channel with the T2 chip. This component is required by the other two: ++ - VHCI (Virtual Host Controller Interface): Access to keyboard, mouse ++ and other system devices depend on this virtual USB host controller ++ - Audio: a driver for the T2 audio interface. ++ ++ If "M" is selected, the module will be called apple-bce.' +diff --git a/drivers/staging/apple-bce/Makefile b/drivers/staging/apple-bce/Makefile +new file mode 100644 +index 000000000000..8cfbd3f64af6 +--- /dev/null ++++ b/drivers/staging/apple-bce/Makefile +@@ -0,0 +1,28 @@ ++modname := apple-bce ++obj-$(CONFIG_APPLE_BCE) += $(modname).o + -+static int pcie_acs_overrides(struct pci_dev *dev, u16 acs_flags) -+{ -+ int i; ++apple-bce-objs := apple_bce.o mailbox.o queue.o queue_dma.o vhci/vhci.o vhci/queue.o vhci/transfer.o audio/audio.o audio/protocol.o audio/protocol_bce.o audio/pcm.o + -+ /* Never override ACS for legacy devices or devices with ACS caps */ -+ if (!pci_is_pcie(dev) || -+ pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS)) -+ return -ENOTTY; ++MY_CFLAGS += -DWITHOUT_NVME_PATCH ++#MY_CFLAGS += -g -DDEBUG ++ccflags-y += ${MY_CFLAGS} ++CC += ${MY_CFLAGS} + -+ for (i = 0; i < max_acs_id; i++) -+ if (acs_on_ids[i].vendor == dev->vendor && -+ acs_on_ids[i].device == dev->device) -+ return 1; ++KVERSION := $(KERNELRELEASE) ++ifeq ($(origin KERNELRELEASE), undefined) ++KVERSION := $(shell uname -r) ++endif + -+ switch (pci_pcie_type(dev)) { -+ case PCI_EXP_TYPE_DOWNSTREAM: -+ case PCI_EXP_TYPE_ROOT_PORT: -+ if (acs_on_downstream) -+ return 1; -+ break; -+ case PCI_EXP_TYPE_ENDPOINT: -+ case PCI_EXP_TYPE_UPSTREAM: -+ case PCI_EXP_TYPE_LEG_END: -+ case PCI_EXP_TYPE_RC_END: -+ if (acs_on_multifunction && dev->multifunction) -+ return 1; -+ } ++KDIR := /lib/modules/$(KVERSION)/build ++PWD := $(shell pwd) + -+ return -ENOTTY; -+} - /* - * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be - * prevented for those affected devices. -@@ -5170,6 +5270,7 @@ static const struct pci_dev_acs_enabled { - { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, - /* Wangxun nics */ - { PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs }, -+ { PCI_ANY_ID, PCI_ANY_ID, pcie_acs_overrides }, - { 0 } - }; - -diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig -index 0258dd879d64..b160173a530e 100644 ---- a/drivers/platform/x86/Kconfig -+++ b/drivers/platform/x86/Kconfig -@@ -267,6 +267,18 @@ config ASUS_WIRELESS - If you choose to compile this driver as a module the module will be - called asus-wireless. - -+config ASUS_ARMOURY -+ tristate "ASUS Armoury driver" -+ depends on ASUS_WMI -+ select FW_ATTR_CLASS -+ help -+ Say Y here if you have a WMI aware Asus machine and would like to use the -+ firmware_attributes API to control various settings typically exposed in -+ the ASUS Armoury Crate application available on Windows. ++.PHONY: all + -+ To compile this driver as a module, choose M here: the module will -+ be called asus-armoury. ++all: ++ $(MAKE) -C $(KDIR) M=$(PWD) modules + - config ASUS_WMI - tristate "ASUS WMI Driver" - depends on ACPI_WMI -@@ -289,6 +301,15 @@ config ASUS_WMI - To compile this driver as a module, choose M here: the module will - be called asus-wmi. - -+config ASUS_WMI_DEPRECATED_ATTRS -+ bool "BIOS option support in WMI platform (DEPRECATED)" -+ depends on ASUS_WMI -+ default y -+ help -+ Say Y to expose the configurable BIOS options through the asus-wmi -+ driver. This can be used with or without the asus-armoury driver which -+ has the same attributes, but more, and better features. ++clean: ++ $(MAKE) -C $(KDIR) M=$(PWD) clean + - config ASUS_NB_WMI - tristate "Asus Notebook WMI Driver" - depends on ASUS_WMI -diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile -index e1b142947067..fe3e7e7dede8 100644 ---- a/drivers/platform/x86/Makefile -+++ b/drivers/platform/x86/Makefile -@@ -32,6 +32,7 @@ obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o - # ASUS - obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o - obj-$(CONFIG_ASUS_WIRELESS) += asus-wireless.o -+obj-$(CONFIG_ASUS_ARMOURY) += asus-armoury.o - obj-$(CONFIG_ASUS_WMI) += asus-wmi.o - obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o - obj-$(CONFIG_ASUS_TF103C_DOCK) += asus-tf103c-dock.o -diff --git a/drivers/platform/x86/asus-armoury.c b/drivers/platform/x86/asus-armoury.c ++install: ++ $(MAKE) -C $(KDIR) M=$(PWD) modules_install +diff --git a/drivers/staging/apple-bce/apple_bce.c b/drivers/staging/apple-bce/apple_bce.c new file mode 100644 -index 000000000000..4dac91d02278 +index 000000000000..4fd2415d7028 --- /dev/null -+++ b/drivers/platform/x86/asus-armoury.c -@@ -0,0 +1,1074 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later -+/* -+ * Asus Armoury (WMI) attributes driver. This driver uses the fw_attributes -+ * class to expose the various WMI functions that many gaming and some -+ * non-gaming ASUS laptops have available. -+ * These typically don't fit anywhere else in the sysfs such as under LED class, -+ * hwmon or other, and are set in Windows using the ASUS Armoury Crate tool. -+ * -+ * Copyright(C) 2024 Luke Jones -+ */ -+ -+#include "linux/cleanup.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include ++++ b/drivers/staging/apple-bce/apple_bce.c +@@ -0,0 +1,445 @@ ++#include "apple_bce.h" +#include -+#include -+#include -+#include -+#include ++#include ++#include "audio/audio.h" ++#include + -+#include "asus-armoury.h" -+#include "firmware_attributes_class.h" ++static dev_t bce_chrdev; ++static struct class *bce_class; + -+#define DEBUG ++struct apple_bce_device *global_bce; + -+#define ASUS_NB_WMI_EVENT_GUID "0B3CBB35-E3C2-45ED-91C2-4C5A6D195D1C" ++static int bce_create_command_queues(struct apple_bce_device *bce); ++static void bce_free_command_queues(struct apple_bce_device *bce); ++static irqreturn_t bce_handle_mb_irq(int irq, void *dev); ++static irqreturn_t bce_handle_dma_irq(int irq, void *dev); ++static int bce_fw_version_handshake(struct apple_bce_device *bce); ++static int bce_register_command_queue(struct apple_bce_device *bce, struct bce_queue_memcfg *cfg, int is_sq); + -+#define ASUS_MINI_LED_MODE_MASK 0x03 -+/* Standard modes for devices with only on/off */ -+#define ASUS_MINI_LED_OFF 0x00 -+#define ASUS_MINI_LED_ON 0x01 -+/* Like "on" but the effect is more vibrant or brighter */ -+#define ASUS_MINI_LED_STRONG_MODE 0x02 -+/* New modes for devices with 3 mini-led mode types */ -+#define ASUS_MINI_LED_2024_WEAK 0x00 -+#define ASUS_MINI_LED_2024_STRONG 0x01 -+#define ASUS_MINI_LED_2024_OFF 0x02 ++static int apple_bce_probe(struct pci_dev *dev, const struct pci_device_id *id) ++{ ++ struct apple_bce_device *bce = NULL; ++ int status = 0; ++ int nvec; + -+#define ASUS_POWER_CORE_MASK GENMASK(15, 8) -+#define ASUS_PERF_CORE_MASK GENMASK(7, 0) ++ pr_info("apple-bce: capturing our device\n"); + -+enum cpu_core_type { -+ CPU_CORE_PERF = 0, -+ CPU_CORE_POWER, -+}; ++ if (pci_enable_device(dev)) ++ return -ENODEV; ++ if (pci_request_regions(dev, "apple-bce")) { ++ status = -ENODEV; ++ goto fail; ++ } ++ pci_set_master(dev); ++ nvec = pci_alloc_irq_vectors(dev, 1, 8, PCI_IRQ_MSI); ++ if (nvec < 5) { ++ status = -EINVAL; ++ goto fail; ++ } + -+enum cpu_core_value { -+ CPU_CORE_DEFAULT = 0, -+ CPU_CORE_MIN, -+ CPU_CORE_MAX, -+ CPU_CORE_CURRENT, -+}; ++ bce = kzalloc(sizeof(struct apple_bce_device), GFP_KERNEL); ++ if (!bce) { ++ status = -ENOMEM; ++ goto fail; ++ } + -+#define CPU_PERF_CORE_COUNT_MIN 4 -+#define CPU_POWR_CORE_COUNT_MIN 0 ++ bce->pci = dev; ++ pci_set_drvdata(dev, bce); + -+/* Default limits for tunables available on ASUS ROG laptops */ -+#define NVIDIA_BOOST_MIN 5 -+#define NVIDIA_BOOST_MAX 25 -+#define NVIDIA_TEMP_MIN 75 -+#define NVIDIA_TEMP_MAX 87 -+#define NVIDIA_POWER_MIN 0 -+#define NVIDIA_POWER_MAX 70 -+#define NVIDIA_POWER_DEFAULT 70 -+#define PPT_CPU_LIMIT_MIN 5 -+#define PPT_CPU_LIMIT_MAX 150 -+#define PPT_CPU_LIMIT_DEFAULT 80 -+#define PPT_PLATFORM_MIN 5 -+#define PPT_PLATFORM_MAX 100 -+#define PPT_PLATFORM_DEFAULT 80 ++ bce->devt = bce_chrdev; ++ bce->dev = device_create(bce_class, &dev->dev, bce->devt, NULL, "apple-bce"); ++ if (IS_ERR_OR_NULL(bce->dev)) { ++ status = PTR_ERR(bce_class); ++ goto fail; ++ } + -+/* Tunables provided by ASUS for gaming laptops */ -+struct rog_tunables { -+ u32 cpu_default; -+ u32 cpu_min; -+ u32 cpu_max; ++ bce->reg_mem_mb = pci_iomap(dev, 4, 0); ++ bce->reg_mem_dma = pci_iomap(dev, 2, 0); + -+ u32 platform_default; -+ u32 platform_min; -+ u32 platform_max; ++ if (IS_ERR_OR_NULL(bce->reg_mem_mb) || IS_ERR_OR_NULL(bce->reg_mem_dma)) { ++ dev_warn(&dev->dev, "apple-bce: Failed to pci_iomap required regions\n"); ++ goto fail; ++ } + -+ u32 ppt_pl1_spl; // cpu -+ u32 ppt_pl2_sppt; // cpu -+ u32 ppt_pl3_fppt; // cpu -+ u32 ppt_apu_sppt; // plat -+ u32 ppt_platform_sppt; // plat ++ bce_mailbox_init(&bce->mbox, bce->reg_mem_mb); ++ bce_timestamp_init(&bce->timestamp, bce->reg_mem_mb); + -+ u32 nv_boost_default; -+ u32 nv_boost_min; -+ u32 nv_boost_max; -+ u32 nv_dynamic_boost; ++ spin_lock_init(&bce->queues_lock); ++ ida_init(&bce->queue_ida); + -+ u32 nv_temp_default; -+ u32 nv_temp_min; -+ u32 nv_temp_max; -+ u32 nv_temp_target; ++ if ((status = pci_request_irq(dev, 0, bce_handle_mb_irq, NULL, dev, "bce_mbox"))) ++ goto fail; ++ if ((status = pci_request_irq(dev, 4, NULL, bce_handle_dma_irq, dev, "bce_dma"))) ++ goto fail_interrupt_0; + -+ u32 dgpu_tgp_default; -+ u32 dgpu_tgp_min; -+ u32 dgpu_tgp_max; -+ u32 dgpu_tgp; ++ if ((status = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(37)))) { ++ dev_warn(&dev->dev, "dma: Setting mask failed\n"); ++ goto fail_interrupt; ++ } + -+ u32 cur_perf_cores; -+ u32 min_perf_cores; -+ u32 max_perf_cores; -+ u32 cur_power_cores; -+ u32 min_power_cores; -+ u32 max_power_cores; -+}; ++ /* Gets the function 0's interface. This is needed because Apple only accepts DMA on our function if function 0 ++ is a bus master, so we need to work around this. */ ++ bce->pci0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); ++#ifndef WITHOUT_NVME_PATCH ++ if ((status = pci_enable_device_mem(bce->pci0))) { ++ dev_warn(&dev->dev, "apple-bce: failed to enable function 0\n"); ++ goto fail_dev0; ++ } ++#endif ++ pci_set_master(bce->pci0); + -+static const struct class *fw_attr_class; ++ bce_timestamp_start(&bce->timestamp, true); + -+struct asus_armoury_priv { -+ struct device *fw_attr_dev; -+ struct kset *fw_attr_kset; ++ if ((status = bce_fw_version_handshake(bce))) ++ goto fail_ts; ++ pr_info("apple-bce: handshake done\n"); + -+ struct rog_tunables *rog_tunables; -+ u32 mini_led_dev_id; -+ u32 gpu_mux_dev_id; ++ if ((status = bce_create_command_queues(bce))) { ++ pr_info("apple-bce: Creating command queues failed\n"); ++ goto fail_ts; ++ } + -+ struct mutex mutex; -+}; ++ global_bce = bce; + -+static struct asus_armoury_priv asus_armoury = { -+ .mutex = __MUTEX_INITIALIZER(asus_armoury.mutex) -+}; ++ bce_vhci_create(bce, &bce->vhci); + -+struct fw_attrs_group { -+ bool pending_reboot; -+}; ++ return 0; + -+static struct fw_attrs_group fw_attrs = { -+ .pending_reboot = false, -+}; ++fail_ts: ++ bce_timestamp_stop(&bce->timestamp); ++#ifndef WITHOUT_NVME_PATCH ++ pci_disable_device(bce->pci0); ++fail_dev0: ++#endif ++ pci_dev_put(bce->pci0); ++fail_interrupt: ++ pci_free_irq(dev, 4, dev); ++fail_interrupt_0: ++ pci_free_irq(dev, 0, dev); ++fail: ++ if (bce && bce->dev) { ++ device_destroy(bce_class, bce->devt); ++ ++ if (!IS_ERR_OR_NULL(bce->reg_mem_mb)) ++ pci_iounmap(dev, bce->reg_mem_mb); ++ if (!IS_ERR_OR_NULL(bce->reg_mem_dma)) ++ pci_iounmap(dev, bce->reg_mem_dma); ++ ++ kfree(bce); ++ } + -+struct asus_attr_group { -+ const struct attribute_group *attr_group; -+ u32 wmi_devid; -+}; ++ pci_free_irq_vectors(dev); ++ pci_release_regions(dev); ++ pci_disable_device(dev); + -+static bool asus_wmi_is_present(u32 dev_id) ++ if (!status) ++ status = -EINVAL; ++ return status; ++} ++ ++static int bce_create_command_queues(struct apple_bce_device *bce) +{ -+ u32 retval; -+ int status; ++ int status; ++ struct bce_queue_memcfg *cfg; + -+ status = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, dev_id, 0, &retval); -+ pr_debug("%s called (0x%08x), retval: 0x%08x\n", __func__, dev_id, retval); ++ bce->cmd_cq = bce_alloc_cq(bce, 0, 0x20); ++ bce->cmd_cmdq = bce_alloc_cmdq(bce, 1, 0x20); ++ if (bce->cmd_cq == NULL || bce->cmd_cmdq == NULL) { ++ status = -ENOMEM; ++ goto err; ++ } ++ bce->queues[0] = (struct bce_queue *) bce->cmd_cq; ++ bce->queues[1] = (struct bce_queue *) bce->cmd_cmdq->sq; + -+ return status == 0 && (retval & ASUS_WMI_DSTS_PRESENCE_BIT); ++ cfg = kzalloc(sizeof(struct bce_queue_memcfg), GFP_KERNEL); ++ if (!cfg) { ++ status = -ENOMEM; ++ goto err; ++ } ++ bce_get_cq_memcfg(bce->cmd_cq, cfg); ++ if ((status = bce_register_command_queue(bce, cfg, false))) ++ goto err; ++ bce_get_sq_memcfg(bce->cmd_cmdq->sq, bce->cmd_cq, cfg); ++ if ((status = bce_register_command_queue(bce, cfg, true))) ++ goto err; ++ kfree(cfg); ++ ++ return 0; ++ ++err: ++ if (bce->cmd_cq) ++ bce_free_cq(bce, bce->cmd_cq); ++ if (bce->cmd_cmdq) ++ bce_free_cmdq(bce, bce->cmd_cmdq); ++ return status; +} + -+static void asus_set_reboot_and_signal_event(void) ++static void bce_free_command_queues(struct apple_bce_device *bce) +{ -+ fw_attrs.pending_reboot = true; -+ kobject_uevent(&asus_armoury.fw_attr_dev->kobj, KOBJ_CHANGE); ++ bce_free_cq(bce, bce->cmd_cq); ++ bce_free_cmdq(bce, bce->cmd_cmdq); ++ bce->cmd_cq = NULL; ++ bce->queues[0] = NULL; ++} ++ ++static irqreturn_t bce_handle_mb_irq(int irq, void *dev) ++{ ++ struct apple_bce_device *bce = pci_get_drvdata(dev); ++ bce_mailbox_handle_interrupt(&bce->mbox); ++ return IRQ_HANDLED; +} + -+static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) ++static irqreturn_t bce_handle_dma_irq(int irq, void *dev) +{ -+ return sysfs_emit(buf, "%d\n", fw_attrs.pending_reboot); ++ int i; ++ struct apple_bce_device *bce = pci_get_drvdata(dev); ++ spin_lock(&bce->queues_lock); ++ for (i = 0; i < BCE_MAX_QUEUE_COUNT; i++) ++ if (bce->queues[i] && bce->queues[i]->type == BCE_QUEUE_CQ) ++ bce_handle_cq_completions(bce, (struct bce_queue_cq *) bce->queues[i]); ++ spin_unlock(&bce->queues_lock); ++ return IRQ_HANDLED; +} + -+static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot); ++static int bce_fw_version_handshake(struct apple_bce_device *bce) ++{ ++ u64 result; ++ int status; + -+static bool asus_bios_requires_reboot(struct kobj_attribute *attr) ++ if ((status = bce_mailbox_send(&bce->mbox, BCE_MB_MSG(BCE_MB_SET_FW_PROTOCOL_VERSION, BC_PROTOCOL_VERSION), ++ &result))) ++ return status; ++ if (BCE_MB_TYPE(result) != BCE_MB_SET_FW_PROTOCOL_VERSION || ++ BCE_MB_VALUE(result) != BC_PROTOCOL_VERSION) { ++ pr_err("apple-bce: FW version handshake failed %x:%llx\n", BCE_MB_TYPE(result), BCE_MB_VALUE(result)); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static int bce_register_command_queue(struct apple_bce_device *bce, struct bce_queue_memcfg *cfg, int is_sq) +{ -+ return !strcmp(attr->attr.name, "gpu_mux_mode") || -+ !strcmp(attr->attr.name, "cores_performance") || -+ !strcmp(attr->attr.name, "cores_efficiency") || -+ !strcmp(attr->attr.name, "panel_hd_mode"); ++ int status; ++ int cmd_type; ++ u64 result; ++ // OS X uses an bidirectional direction, but that's not really needed ++ dma_addr_t a = dma_map_single(&bce->pci->dev, cfg, sizeof(struct bce_queue_memcfg), DMA_TO_DEVICE); ++ if (dma_mapping_error(&bce->pci->dev, a)) ++ return -ENOMEM; ++ cmd_type = is_sq ? BCE_MB_REGISTER_COMMAND_SQ : BCE_MB_REGISTER_COMMAND_CQ; ++ status = bce_mailbox_send(&bce->mbox, BCE_MB_MSG(cmd_type, a), &result); ++ dma_unmap_single(&bce->pci->dev, a, sizeof(struct bce_queue_memcfg), DMA_TO_DEVICE); ++ if (status) ++ return status; ++ if (BCE_MB_TYPE(result) != BCE_MB_REGISTER_COMMAND_QUEUE_REPLY) ++ return -EINVAL; ++ return 0; +} + -+static int armoury_wmi_set_devstate(struct kobj_attribute *attr, u32 value, u32 wmi_dev) ++static void apple_bce_remove(struct pci_dev *dev) +{ -+ u32 result; -+ int err; ++ struct apple_bce_device *bce = pci_get_drvdata(dev); ++ bce->is_being_removed = true; + -+ guard(mutex)(&asus_armoury.mutex); -+ err = asus_wmi_set_devstate(wmi_dev, value, &result); -+ if (err) { -+ pr_err("Failed to set %s: %d\n", attr->attr.name, err); -+ return err; -+ } -+ /* -+ * !1 is usually considered a fail by ASUS, but some WMI methods do use > 1 -+ * to return a status code or similar. -+ */ -+ if (result < 1) { -+ pr_err("Failed to set %s: (result): 0x%x\n", attr->attr.name, result); -+ return -EIO; -+ } ++ bce_vhci_destroy(&bce->vhci); + -+ return 0; ++ bce_timestamp_stop(&bce->timestamp); ++#ifndef WITHOUT_NVME_PATCH ++ pci_disable_device(bce->pci0); ++#endif ++ pci_dev_put(bce->pci0); ++ pci_free_irq(dev, 0, dev); ++ pci_free_irq(dev, 4, dev); ++ bce_free_command_queues(bce); ++ pci_iounmap(dev, bce->reg_mem_mb); ++ pci_iounmap(dev, bce->reg_mem_dma); ++ device_destroy(bce_class, bce->devt); ++ pci_free_irq_vectors(dev); ++ pci_release_regions(dev); ++ pci_disable_device(dev); ++ kfree(bce); ++} ++ ++static int bce_save_state_and_sleep(struct apple_bce_device *bce) ++{ ++ int attempt, status = 0; ++ u64 resp; ++ dma_addr_t dma_addr; ++ void *dma_ptr = NULL; ++ size_t size = max(PAGE_SIZE, 4096UL); ++ ++ for (attempt = 0; attempt < 5; ++attempt) { ++ pr_debug("apple-bce: suspend: attempt %i, buffer size %li\n", attempt, size); ++ dma_ptr = dma_alloc_coherent(&bce->pci->dev, size, &dma_addr, GFP_KERNEL); ++ if (!dma_ptr) { ++ pr_err("apple-bce: suspend failed (data alloc failed)\n"); ++ break; ++ } ++ BUG_ON((dma_addr % 4096) != 0); ++ status = bce_mailbox_send(&bce->mbox, ++ BCE_MB_MSG(BCE_MB_SAVE_STATE_AND_SLEEP, (dma_addr & ~(4096LLU - 1)) | (size / 4096)), &resp); ++ if (status) { ++ pr_err("apple-bce: suspend failed (mailbox send)\n"); ++ break; ++ } ++ if (BCE_MB_TYPE(resp) == BCE_MB_SAVE_RESTORE_STATE_COMPLETE) { ++ bce->saved_data_dma_addr = dma_addr; ++ bce->saved_data_dma_ptr = dma_ptr; ++ bce->saved_data_dma_size = size; ++ return 0; ++ } else if (BCE_MB_TYPE(resp) == BCE_MB_SAVE_STATE_AND_SLEEP_FAILURE) { ++ dma_free_coherent(&bce->pci->dev, size, dma_ptr, dma_addr); ++ /* The 0x10ff magic value was extracted from Apple's driver */ ++ size = (BCE_MB_VALUE(resp) + 0x10ff) & ~(4096LLU - 1); ++ pr_debug("apple-bce: suspend: device requested a larger buffer (%li)\n", size); ++ continue; ++ } else { ++ pr_err("apple-bce: suspend failed (invalid device response)\n"); ++ status = -EINVAL; ++ break; ++ } ++ } ++ if (dma_ptr) ++ dma_free_coherent(&bce->pci->dev, size, dma_ptr, dma_addr); ++ if (!status) ++ return bce_mailbox_send(&bce->mbox, BCE_MB_MSG(BCE_MB_SLEEP_NO_STATE, 0), &resp); ++ return status; +} + -+/** -+ * attr_int_store() - Send an int to wmi method, checks if within min/max exclusive. -+ * @kobj: Pointer to the driver object. -+ * @attr: Pointer to the attribute calling this function. -+ * @buf: The buffer to read from, this is parsed to `int` type. -+ * @count: Required by sysfs attribute macros, pass in from the callee attr. -+ * @min: Minimum accepted value. Below this returns -EINVAL. -+ * @max: Maximum accepted value. Above this returns -EINVAL. -+ * @store_value: Pointer to where the parsed value should be stored. -+ * @wmi_dev: The WMI function ID to use. -+ * -+ * This function is intended to be generic so it can be called from any "_store" -+ * attribute which works only with integers. The integer to be sent to the WMI method -+ * is range checked and an error returned if out of range. -+ * -+ * If the value is valid and WMI is success, then the sysfs attribute is notified -+ * and if asus_bios_requires_reboot() is true then reboot attribute is also notified. -+ * -+ * Returns: Either count, or an error. -+ */ -+static ssize_t attr_uint_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, -+ size_t count, u32 min, u32 max, u32 *store_value, u32 wmi_dev) ++static int bce_restore_state_and_wake(struct apple_bce_device *bce) +{ -+ u32 value; -+ int err; ++ int status; ++ u64 resp; ++ if (!bce->saved_data_dma_ptr) { ++ if ((status = bce_mailbox_send(&bce->mbox, BCE_MB_MSG(BCE_MB_RESTORE_NO_STATE, 0), &resp))) { ++ pr_err("apple-bce: resume with no state failed (mailbox send)\n"); ++ return status; ++ } ++ if (BCE_MB_TYPE(resp) != BCE_MB_RESTORE_NO_STATE) { ++ pr_err("apple-bce: resume with no state failed (invalid device response)\n"); ++ return -EINVAL; ++ } ++ return 0; ++ } + -+ err = kstrtouint(buf, 10, &value); -+ if (err) -+ return err; ++ if ((status = bce_mailbox_send(&bce->mbox, BCE_MB_MSG(BCE_MB_RESTORE_STATE_AND_WAKE, ++ (bce->saved_data_dma_addr & ~(4096LLU - 1)) | (bce->saved_data_dma_size / 4096)), &resp))) { ++ pr_err("apple-bce: resume with state failed (mailbox send)\n"); ++ goto finish_with_state; ++ } ++ if (BCE_MB_TYPE(resp) != BCE_MB_SAVE_RESTORE_STATE_COMPLETE) { ++ pr_err("apple-bce: resume with state failed (invalid device response)\n"); ++ status = -EINVAL; ++ goto finish_with_state; ++ } + -+ if (value < min || value > max) -+ return -EINVAL; ++finish_with_state: ++ dma_free_coherent(&bce->pci->dev, bce->saved_data_dma_size, bce->saved_data_dma_ptr, bce->saved_data_dma_addr); ++ bce->saved_data_dma_ptr = NULL; ++ return status; ++} + -+ err = armoury_wmi_set_devstate(attr, value, wmi_dev); -+ if (err) -+ return err; ++static int apple_bce_suspend(struct device *dev) ++{ ++ struct apple_bce_device *bce = pci_get_drvdata(to_pci_dev(dev)); ++ int status; + -+ if (store_value != NULL) -+ *store_value = value; -+ sysfs_notify(kobj, NULL, attr->attr.name); ++ bce_timestamp_stop(&bce->timestamp); + -+ if (asus_bios_requires_reboot(attr)) -+ asus_set_reboot_and_signal_event(); ++ if ((status = bce_save_state_and_sleep(bce))) ++ return status; + -+ return count; ++ return 0; +} + -+/* Mini-LED mode **************************************************************/ -+static ssize_t mini_led_mode_current_value_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) ++static int apple_bce_resume(struct device *dev) +{ -+ u32 value; -+ int err; ++ struct apple_bce_device *bce = pci_get_drvdata(to_pci_dev(dev)); ++ int status; + -+ err = asus_wmi_get_devstate_dsts(asus_armoury.mini_led_dev_id, &value); -+ if (err) -+ return err; ++ pci_set_master(bce->pci); ++ pci_set_master(bce->pci0); + -+ value &= ASUS_MINI_LED_MODE_MASK; ++ if ((status = bce_restore_state_and_wake(bce))) ++ return status; + -+ /* -+ * Remap the mode values to match previous generation mini-LED. The last gen -+ * WMI 0 == off, while on this version WMI 2 == off (flipped). -+ */ -+ if (asus_armoury.mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2) { -+ switch (value) { -+ case ASUS_MINI_LED_2024_WEAK: -+ value = ASUS_MINI_LED_ON; -+ break; -+ case ASUS_MINI_LED_2024_STRONG: -+ value = ASUS_MINI_LED_STRONG_MODE; -+ break; -+ case ASUS_MINI_LED_2024_OFF: -+ value = ASUS_MINI_LED_OFF; -+ break; -+ } -+ } ++ bce_timestamp_start(&bce->timestamp, false); + -+ return sysfs_emit(buf, "%u\n", value); ++ return 0; +} + -+static ssize_t mini_led_mode_current_value_store(struct kobject *kobj, -+ struct kobj_attribute *attr, -+ const char *buf, size_t count) -+{ -+ u32 mode; -+ int err; ++static struct pci_device_id apple_bce_ids[ ] = { ++ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x1801) }, ++ { 0, }, ++}; + -+ err = kstrtou32(buf, 10, &mode); -+ if (err) -+ return err; ++MODULE_DEVICE_TABLE(pci, apple_bce_ids); + -+ if (asus_armoury.mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE && -+ mode > ASUS_MINI_LED_ON) -+ return -EINVAL; -+ if (asus_armoury.mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2 && -+ mode > ASUS_MINI_LED_STRONG_MODE) -+ return -EINVAL; ++struct dev_pm_ops apple_bce_pci_driver_pm = { ++ .suspend = apple_bce_suspend, ++ .resume = apple_bce_resume ++}; ++struct pci_driver apple_bce_pci_driver = { ++ .name = "apple-bce", ++ .id_table = apple_bce_ids, ++ .probe = apple_bce_probe, ++ .remove = apple_bce_remove, ++ .driver = { ++ .pm = &apple_bce_pci_driver_pm ++ } ++}; + -+ /* -+ * Remap the mode values so expected behaviour is the same as the last -+ * generation of mini-LED with 0 == off, 1 == on. -+ */ -+ if (asus_armoury.mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2) { -+ switch (mode) { -+ case ASUS_MINI_LED_OFF: -+ mode = ASUS_MINI_LED_2024_OFF; -+ break; -+ case ASUS_MINI_LED_ON: -+ mode = ASUS_MINI_LED_2024_WEAK; -+ break; -+ case ASUS_MINI_LED_STRONG_MODE: -+ mode = ASUS_MINI_LED_2024_STRONG; -+ break; -+ } -+ } + -+ err = armoury_wmi_set_devstate(attr, mode, asus_armoury.mini_led_dev_id); -+ if (err) -+ return err; ++static int __init apple_bce_module_init(void) ++{ ++ int result; ++ if ((result = alloc_chrdev_region(&bce_chrdev, 0, 1, "apple-bce"))) ++ goto fail_chrdev; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,4,0) ++ bce_class = class_create(THIS_MODULE, "apple-bce"); ++#else ++ bce_class = class_create("apple-bce"); ++#endif ++ if (IS_ERR(bce_class)) { ++ result = PTR_ERR(bce_class); ++ goto fail_class; ++ } ++ if ((result = bce_vhci_module_init())) { ++ pr_err("apple-bce: bce-vhci init failed"); ++ goto fail_class; ++ } + -+ sysfs_notify(kobj, NULL, attr->attr.name); ++ result = pci_register_driver(&apple_bce_pci_driver); ++ if (result) ++ goto fail_drv; + -+ return count; -+} ++ aaudio_module_init(); + -+static ssize_t mini_led_mode_possible_values_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) -+{ -+ switch (asus_armoury.mini_led_dev_id) { -+ case ASUS_WMI_DEVID_MINI_LED_MODE: -+ return sysfs_emit(buf, "0;1\n"); -+ case ASUS_WMI_DEVID_MINI_LED_MODE2: -+ return sysfs_emit(buf, "0;1;2\n"); -+ } ++ return 0; + -+ return sysfs_emit(buf, "0\n"); ++fail_drv: ++ pci_unregister_driver(&apple_bce_pci_driver); ++fail_class: ++ class_destroy(bce_class); ++fail_chrdev: ++ unregister_chrdev_region(bce_chrdev, 1); ++ if (!result) ++ result = -EINVAL; ++ return result; +} -+ -+ATTR_GROUP_ENUM_CUSTOM(mini_led_mode, "mini_led_mode", "Set the mini-LED backlight mode"); -+ -+static ssize_t gpu_mux_mode_current_value_store(struct kobject *kobj, -+ struct kobj_attribute *attr, const char *buf, -+ size_t count) ++static void __exit apple_bce_module_exit(void) +{ -+ int result, err; -+ u32 optimus; ++ pci_unregister_driver(&apple_bce_pci_driver); + -+ err = kstrtou32(buf, 10, &optimus); -+ if (err) -+ return err; ++ aaudio_module_exit(); ++ bce_vhci_module_exit(); ++ class_destroy(bce_class); ++ unregister_chrdev_region(bce_chrdev, 1); ++} + -+ if (optimus > 1) -+ return -EINVAL; ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("MrARM"); ++MODULE_DESCRIPTION("Apple BCE Driver"); ++MODULE_VERSION("0.01"); ++module_init(apple_bce_module_init); ++module_exit(apple_bce_module_exit); +diff --git a/drivers/staging/apple-bce/apple_bce.h b/drivers/staging/apple-bce/apple_bce.h +new file mode 100644 +index 000000000000..f13ab8d5742e +--- /dev/null ++++ b/drivers/staging/apple-bce/apple_bce.h +@@ -0,0 +1,38 @@ ++#pragma once + -+ if (asus_wmi_is_present(ASUS_WMI_DEVID_DGPU)) { -+ err = asus_wmi_get_devstate_dsts(ASUS_WMI_DEVID_DGPU, &result); -+ if (err) -+ return err; -+ if (result && !optimus) { -+ err = -ENODEV; -+ pr_warn("Can not switch MUX to dGPU mode when dGPU is disabled: %02X %02X %d\n", -+ result, optimus, err); -+ return err; -+ } -+ } ++#include ++#include ++#include "mailbox.h" ++#include "queue.h" ++#include "vhci/vhci.h" ++ ++#define BC_PROTOCOL_VERSION 0x20001 ++#define BCE_MAX_QUEUE_COUNT 0x100 ++ ++#define BCE_QUEUE_USER_MIN 2 ++#define BCE_QUEUE_USER_MAX (BCE_MAX_QUEUE_COUNT - 1) ++ ++struct apple_bce_device { ++ struct pci_dev *pci, *pci0; ++ dev_t devt; ++ struct device *dev; ++ void __iomem *reg_mem_mb; ++ void __iomem *reg_mem_dma; ++ struct bce_mailbox mbox; ++ struct bce_timestamp timestamp; ++ struct bce_queue *queues[BCE_MAX_QUEUE_COUNT]; ++ struct spinlock queues_lock; ++ struct ida queue_ida; ++ struct bce_queue_cq *cmd_cq; ++ struct bce_queue_cmdq *cmd_cmdq; ++ struct bce_queue_sq *int_sq_list[BCE_MAX_QUEUE_COUNT]; ++ bool is_being_removed; ++ ++ dma_addr_t saved_data_dma_addr; ++ void *saved_data_dma_ptr; ++ size_t saved_data_dma_size; ++ ++ struct bce_vhci vhci; ++}; + -+ if (asus_wmi_is_present(ASUS_WMI_DEVID_EGPU)) { -+ err = asus_wmi_get_devstate_dsts(ASUS_WMI_DEVID_EGPU, &result); -+ if (err) -+ return err; -+ if (result && !optimus) { -+ err = -ENODEV; -+ pr_warn("Can not switch MUX to dGPU mode when eGPU is enabled: %d\n", -+ err); -+ return err; -+ } -+ } ++extern struct apple_bce_device *global_bce; +\ No newline at end of file +diff --git a/drivers/staging/apple-bce/audio/audio.c b/drivers/staging/apple-bce/audio/audio.c +new file mode 100644 +index 000000000000..bd16ddd16c1d +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/audio.c +@@ -0,0 +1,711 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "audio.h" ++#include "pcm.h" ++#include + -+ err = armoury_wmi_set_devstate(attr, optimus, asus_armoury.gpu_mux_dev_id); -+ if (err) -+ return err; ++static int aaudio_alsa_index = SNDRV_DEFAULT_IDX1; ++static char *aaudio_alsa_id = SNDRV_DEFAULT_STR1; + -+ sysfs_notify(kobj, NULL, attr->attr.name); -+ asus_set_reboot_and_signal_event(); ++static dev_t aaudio_chrdev; ++static struct class *aaudio_class; + -+ return count; -+} -+WMI_SHOW_INT(gpu_mux_mode_current_value, "%d\n", asus_armoury.gpu_mux_dev_id); -+ATTR_GROUP_BOOL_CUSTOM(gpu_mux_mode, "gpu_mux_mode", "Set the GPU display MUX mode"); ++static int aaudio_init_cmd(struct aaudio_device *a); ++static int aaudio_init_bs(struct aaudio_device *a); ++static void aaudio_init_dev(struct aaudio_device *a, aaudio_device_id_t dev_id); ++static void aaudio_free_dev(struct aaudio_subdevice *sdev); + -+/* -+ * A user may be required to store the value twice, typical store first, then -+ * rescan PCI bus to activate power, then store a second time to save correctly. -+ */ -+static ssize_t dgpu_disable_current_value_store(struct kobject *kobj, -+ struct kobj_attribute *attr, const char *buf, -+ size_t count) ++static int aaudio_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ -+ int result, err; -+ u32 disable; ++ struct aaudio_device *aaudio = NULL; ++ struct aaudio_subdevice *sdev = NULL; ++ int status = 0; ++ u32 cfg; + -+ err = kstrtou32(buf, 10, &disable); -+ if (err) -+ return err; ++ pr_info("aaudio: capturing our device\n"); + -+ if (disable > 1) -+ return -EINVAL; ++ if (pci_enable_device(dev)) ++ return -ENODEV; ++ if (pci_request_regions(dev, "aaudio")) { ++ status = -ENODEV; ++ goto fail; ++ } ++ pci_set_master(dev); + -+ if (asus_armoury.gpu_mux_dev_id) { -+ err = asus_wmi_get_devstate_dsts(asus_armoury.gpu_mux_dev_id, &result); -+ if (err) -+ return err; -+ if (!result && disable) { -+ err = -ENODEV; -+ pr_warn("Can not disable dGPU when the MUX is in dGPU mode: %d\n", err); -+ return err; -+ } -+ // TODO: handle a > 1 result, shouold do a PCI rescan and run again -+ } ++ aaudio = kzalloc(sizeof(struct aaudio_device), GFP_KERNEL); ++ if (!aaudio) { ++ status = -ENOMEM; ++ goto fail; ++ } + -+ err = armoury_wmi_set_devstate(attr, disable, ASUS_WMI_DEVID_DGPU); -+ if (err) -+ return err; ++ aaudio->bce = global_bce; ++ if (!aaudio->bce) { ++ dev_warn(&dev->dev, "aaudio: No BCE available\n"); ++ status = -EINVAL; ++ goto fail; ++ } + -+ sysfs_notify(kobj, NULL, attr->attr.name); ++ aaudio->pci = dev; ++ pci_set_drvdata(dev, aaudio); + -+ return count; -+} -+WMI_SHOW_INT(dgpu_disable_current_value, "%d\n", ASUS_WMI_DEVID_DGPU); -+ATTR_GROUP_BOOL_CUSTOM(dgpu_disable, "dgpu_disable", "Disable the dGPU"); ++ aaudio->devt = aaudio_chrdev; ++ aaudio->dev = device_create(aaudio_class, &dev->dev, aaudio->devt, NULL, "aaudio"); ++ if (IS_ERR_OR_NULL(aaudio->dev)) { ++ status = PTR_ERR(aaudio_class); ++ goto fail; ++ } ++ device_link_add(aaudio->dev, aaudio->bce->dev, DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER); + -+/* The ACPI call to enable the eGPU also disables the internal dGPU */ -+static ssize_t egpu_enable_current_value_store(struct kobject *kobj, struct kobj_attribute *attr, -+ const char *buf, size_t count) -+{ -+ int result, err; -+ u32 enable; ++ init_completion(&aaudio->remote_alive); ++ INIT_LIST_HEAD(&aaudio->subdevice_list); + -+ err = kstrtou32(buf, 10, &enable); -+ if (err) -+ return err; ++ /* Init: set an unknown flag in the bitset */ ++ if (pci_read_config_dword(dev, 4, &cfg)) ++ dev_warn(&dev->dev, "aaudio: pci_read_config_dword fail\n"); ++ if (pci_write_config_dword(dev, 4, cfg | 6u)) ++ dev_warn(&dev->dev, "aaudio: pci_write_config_dword fail\n"); + -+ if (enable > 1) -+ return -EINVAL; ++ dev_info(aaudio->dev, "aaudio: bs len = %llx\n", pci_resource_len(dev, 0)); ++ aaudio->reg_mem_bs_dma = pci_resource_start(dev, 0); ++ aaudio->reg_mem_bs = pci_iomap(dev, 0, 0); ++ aaudio->reg_mem_cfg = pci_iomap(dev, 4, 0); + -+ err = asus_wmi_get_devstate_dsts(ASUS_WMI_DEVID_EGPU_CONNECTED, &result); -+ if (err) { -+ pr_warn("Failed to get eGPU connection status: %d\n", err); -+ return err; -+ } ++ aaudio->reg_mem_gpr = (u32 __iomem *) ((u8 __iomem *) aaudio->reg_mem_cfg + 0xC000); + -+ if (asus_armoury.gpu_mux_dev_id) { -+ err = asus_wmi_get_devstate_dsts(asus_armoury.gpu_mux_dev_id, &result); -+ if (err) { -+ pr_warn("Failed to get GPU MUX status: %d\n", result); -+ return result; -+ } -+ if (!result && enable) { -+ err = -ENODEV; -+ pr_warn("Can not enable eGPU when the MUX is in dGPU mode: %d\n", err); -+ return err; -+ } -+ } ++ if (IS_ERR_OR_NULL(aaudio->reg_mem_bs) || IS_ERR_OR_NULL(aaudio->reg_mem_cfg)) { ++ dev_warn(&dev->dev, "aaudio: Failed to pci_iomap required regions\n"); ++ goto fail; ++ } + -+ err = armoury_wmi_set_devstate(attr, enable, ASUS_WMI_DEVID_EGPU); -+ if (err) -+ return err; ++ if (aaudio_bce_init(aaudio)) { ++ dev_warn(&dev->dev, "aaudio: Failed to init BCE command transport\n"); ++ goto fail; ++ } + -+ sysfs_notify(kobj, NULL, attr->attr.name); ++ if (snd_card_new(aaudio->dev, aaudio_alsa_index, aaudio_alsa_id, THIS_MODULE, 0, &aaudio->card)) { ++ dev_err(&dev->dev, "aaudio: Failed to create ALSA card\n"); ++ goto fail; ++ } + -+ return count; -+} -+WMI_SHOW_INT(egpu_enable_current_value, "%d\n", ASUS_WMI_DEVID_EGPU); -+ATTR_GROUP_BOOL_CUSTOM(egpu_enable, "egpu_enable", "Enable the eGPU (also disables dGPU)"); ++ strcpy(aaudio->card->shortname, "Apple T2 Audio"); ++ strcpy(aaudio->card->longname, "Apple T2 Audio"); ++ strcpy(aaudio->card->mixername, "Apple T2 Audio"); ++ /* Dynamic alsa ids start at 100 */ ++ aaudio->next_alsa_id = 100; + -+/* Device memory available to APU */ ++ if (aaudio_init_cmd(aaudio)) { ++ dev_err(&dev->dev, "aaudio: Failed to initialize over BCE\n"); ++ goto fail_snd; ++ } + -+static ssize_t apu_mem_current_value_show(struct kobject *kobj, struct kobj_attribute *attr, -+ char *buf) -+{ -+ int err; -+ u32 mem; ++ if (aaudio_init_bs(aaudio)) { ++ dev_err(&dev->dev, "aaudio: Failed to initialize BufferStruct\n"); ++ goto fail_snd; ++ } + -+ err = asus_wmi_get_devstate_dsts(ASUS_WMI_DEVID_APU_MEM, &mem); -+ if (err) -+ return err; ++ if ((status = aaudio_cmd_set_remote_access(aaudio, AAUDIO_REMOTE_ACCESS_ON))) { ++ dev_err(&dev->dev, "Failed to set remote access\n"); ++ return status; ++ } + -+ switch (mem) { -+ case 0x100: -+ mem = 0; -+ break; -+ case 0x102: -+ mem = 1; -+ break; -+ case 0x103: -+ mem = 2; -+ break; -+ case 0x104: -+ mem = 3; -+ break; -+ case 0x105: -+ mem = 4; -+ break; -+ case 0x106: -+ /* This is out of order and looks wrong but is correct */ -+ mem = 8; -+ break; -+ case 0x107: -+ mem = 5; -+ break; -+ case 0x108: -+ mem = 6; -+ break; -+ case 0x109: -+ mem = 7; -+ break; -+ default: -+ mem = 4; -+ break; -+ } ++ if (snd_card_register(aaudio->card)) { ++ dev_err(&dev->dev, "aaudio: Failed to register ALSA sound device\n"); ++ goto fail_snd; ++ } + -+ return sysfs_emit(buf, "%u\n", mem); -+} ++ list_for_each_entry(sdev, &aaudio->subdevice_list, list) { ++ struct aaudio_buffer_struct_device *dev = &aaudio->bs->devices[sdev->buf_id]; + -+static ssize_t apu_mem_current_value_store(struct kobject *kobj, struct kobj_attribute *attr, -+ const char *buf, size_t count) -+{ -+ int result, err; -+ u32 requested, mem; ++ if (sdev->out_stream_cnt == 1 && !strcmp(dev->name, "Speaker")) { ++ struct snd_pcm_hardware *hw = sdev->out_streams[0].alsa_hw_desc; + -+ result = kstrtou32(buf, 10, &requested); -+ if (result) -+ return result; ++ snprintf(aaudio->card->driver, sizeof(aaudio->card->driver) / sizeof(char), "AppleT2x%d", hw->channels_min); ++ } ++ } + -+ switch (requested) { -+ case 0: -+ mem = 0x000; -+ break; -+ case 1: -+ mem = 0x102; -+ break; -+ case 2: -+ mem = 0x103; -+ break; -+ case 3: -+ mem = 0x104; -+ break; -+ case 4: -+ mem = 0x105; -+ break; -+ case 5: -+ mem = 0x107; -+ break; -+ case 6: -+ mem = 0x108; -+ break; -+ case 7: -+ mem = 0x109; -+ break; -+ case 8: -+ /* This is out of order and looks wrong but is correct */ -+ mem = 0x106; -+ break; -+ default: -+ return -EIO; -+ } ++ return 0; + -+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_APU_MEM, mem, &result); -+ if (err) { -+ pr_warn("Failed to set apu_mem: %d\n", err); -+ return err; -+ } ++fail_snd: ++ snd_card_free(aaudio->card); ++fail: ++ if (aaudio && aaudio->dev) ++ device_destroy(aaudio_class, aaudio->devt); ++ kfree(aaudio); + -+ pr_info("APU memory changed to %uGB, reboot required\n", requested); -+ sysfs_notify(kobj, NULL, attr->attr.name); ++ if (!IS_ERR_OR_NULL(aaudio->reg_mem_bs)) ++ pci_iounmap(dev, aaudio->reg_mem_bs); ++ if (!IS_ERR_OR_NULL(aaudio->reg_mem_cfg)) ++ pci_iounmap(dev, aaudio->reg_mem_cfg); + -+ asus_set_reboot_and_signal_event(); ++ pci_release_regions(dev); ++ pci_disable_device(dev); + -+ return count; ++ if (!status) ++ status = -EINVAL; ++ return status; +} + -+static ssize_t apu_mem_possible_values_show(struct kobject *kobj, struct kobj_attribute *attr, -+ char *buf) ++ ++ ++static void aaudio_remove(struct pci_dev *dev) +{ -+ return sysfs_emit(buf, "0;1;2;3;4;5;6;7;8\n"); ++ struct aaudio_subdevice *sdev; ++ struct aaudio_device *aaudio = pci_get_drvdata(dev); ++ ++ snd_card_free(aaudio->card); ++ while (!list_empty(&aaudio->subdevice_list)) { ++ sdev = list_first_entry(&aaudio->subdevice_list, struct aaudio_subdevice, list); ++ list_del(&sdev->list); ++ aaudio_free_dev(sdev); ++ } ++ pci_iounmap(dev, aaudio->reg_mem_bs); ++ pci_iounmap(dev, aaudio->reg_mem_cfg); ++ device_destroy(aaudio_class, aaudio->devt); ++ pci_free_irq_vectors(dev); ++ pci_release_regions(dev); ++ pci_disable_device(dev); ++ kfree(aaudio); +} -+ATTR_GROUP_ENUM_CUSTOM(apu_mem, "apu_mem", "Set available system RAM (in GB) for the APU to use"); + -+static int init_max_cpu_cores(void) ++static int aaudio_suspend(struct device *dev) +{ -+ u32 cores; -+ int err; ++ struct aaudio_device *aaudio = pci_get_drvdata(to_pci_dev(dev)); + -+ err = asus_wmi_get_devstate_dsts(ASUS_WMI_DEVID_CORES_MAX, &cores); -+ if (err) -+ return err; ++ if (aaudio_cmd_set_remote_access(aaudio, AAUDIO_REMOTE_ACCESS_OFF)) ++ dev_warn(aaudio->dev, "Failed to reset remote access\n"); + -+ cores &= ~ASUS_WMI_DSTS_PRESENCE_BIT; -+ asus_armoury.rog_tunables->max_power_cores = FIELD_GET(ASUS_POWER_CORE_MASK, cores); -+ asus_armoury.rog_tunables->max_perf_cores = FIELD_GET(ASUS_PERF_CORE_MASK, cores); ++ pci_disable_device(aaudio->pci); ++ return 0; ++} + -+ err = asus_wmi_get_devstate_dsts(ASUS_WMI_DEVID_CORES, &cores); -+ if (err) { -+ pr_err("Could not get CPU core count: error %d", err); -+ return err; -+ } ++static int aaudio_resume(struct device *dev) ++{ ++ int status; ++ struct aaudio_device *aaudio = pci_get_drvdata(to_pci_dev(dev)); + -+ asus_armoury.rog_tunables->cur_perf_cores = FIELD_GET(ASUS_PERF_CORE_MASK, cores); -+ asus_armoury.rog_tunables->cur_power_cores = FIELD_GET(ASUS_POWER_CORE_MASK, cores); ++ if ((status = pci_enable_device(aaudio->pci))) ++ return status; ++ pci_set_master(aaudio->pci); + -+ asus_armoury.rog_tunables->min_perf_cores = CPU_PERF_CORE_COUNT_MIN; -+ asus_armoury.rog_tunables->min_power_cores = CPU_POWR_CORE_COUNT_MIN; ++ if ((status = aaudio_cmd_set_remote_access(aaudio, AAUDIO_REMOTE_ACCESS_ON))) { ++ dev_err(aaudio->dev, "Failed to set remote access\n"); ++ return status; ++ } + -+ return 0; ++ return 0; +} + -+static ssize_t cores_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf, -+ enum cpu_core_type core_type, enum cpu_core_value core_value) ++static int aaudio_init_cmd(struct aaudio_device *a) +{ -+ u32 cores; ++ int status; ++ struct aaudio_send_ctx sctx; ++ struct aaudio_msg buf; ++ u64 dev_cnt, dev_i; ++ aaudio_device_id_t *dev_l; ++ ++ if ((status = aaudio_send(a, &sctx, 500, ++ aaudio_msg_write_alive_notification, 1, 3))) { ++ dev_err(a->dev, "Sending alive notification failed\n"); ++ return status; ++ } + -+ switch (core_value) { -+ case CPU_CORE_DEFAULT: -+ case CPU_CORE_MAX: -+ if (core_type == CPU_CORE_PERF) -+ return sysfs_emit(buf, "%d\n", -+ asus_armoury.rog_tunables->max_perf_cores); -+ else -+ return sysfs_emit(buf, "%d\n", -+ asus_armoury.rog_tunables->max_power_cores); -+ case CPU_CORE_MIN: -+ if (core_type == CPU_CORE_PERF) -+ return sysfs_emit(buf, "%d\n", -+ asus_armoury.rog_tunables->min_perf_cores); -+ else -+ return sysfs_emit(buf, "%d\n", -+ asus_armoury.rog_tunables->min_power_cores); -+ default: -+ break; -+ } ++ if (wait_for_completion_timeout(&a->remote_alive, msecs_to_jiffies(500)) == 0) { ++ dev_err(a->dev, "Timed out waiting for remote\n"); ++ return -ETIMEDOUT; ++ } ++ dev_info(a->dev, "Continuing init\n"); + -+ if (core_type == CPU_CORE_PERF) -+ cores = asus_armoury.rog_tunables->cur_perf_cores; -+ else -+ cores = asus_armoury.rog_tunables->cur_power_cores; ++ buf = aaudio_reply_alloc(); ++ if ((status = aaudio_cmd_get_device_list(a, &buf, &dev_l, &dev_cnt))) { ++ dev_err(a->dev, "Failed to get device list\n"); ++ aaudio_reply_free(&buf); ++ return status; ++ } ++ for (dev_i = 0; dev_i < dev_cnt; ++dev_i) ++ aaudio_init_dev(a, dev_l[dev_i]); ++ aaudio_reply_free(&buf); + -+ return sysfs_emit(buf, "%d\n", cores); ++ return 0; +} + -+static ssize_t cores_current_value_store(struct kobject *kobj, struct kobj_attribute *attr, -+ const char *buf, enum cpu_core_type core_type) ++static void aaudio_init_stream_info(struct aaudio_subdevice *sdev, struct aaudio_stream *strm); ++static void aaudio_handle_jack_connection_change(struct aaudio_subdevice *sdev); ++ ++static void aaudio_init_dev(struct aaudio_device *a, aaudio_device_id_t dev_id) +{ -+ u32 new_cores, perf_cores, power_cores, out_val, min, max; -+ int result, err; ++ struct aaudio_subdevice *sdev; ++ struct aaudio_msg buf = aaudio_reply_alloc(); ++ u64 uid_len, stream_cnt, i; ++ aaudio_object_id_t *stream_list; ++ char *uid; + -+ result = kstrtou32(buf, 10, &new_cores); -+ if (result) -+ return result; ++ sdev = kzalloc(sizeof(struct aaudio_subdevice), GFP_KERNEL); + -+ if (core_type == CPU_CORE_PERF) { -+ perf_cores = new_cores; -+ power_cores = out_val = asus_armoury.rog_tunables->cur_power_cores; -+ min = asus_armoury.rog_tunables->min_perf_cores; -+ max = asus_armoury.rog_tunables->max_perf_cores; -+ } else { -+ perf_cores = asus_armoury.rog_tunables->cur_perf_cores; -+ power_cores = out_val = new_cores; -+ min = asus_armoury.rog_tunables->min_power_cores; -+ max = asus_armoury.rog_tunables->max_power_cores; -+ } ++ if (aaudio_cmd_get_property(a, &buf, dev_id, dev_id, AAUDIO_PROP(AAUDIO_PROP_SCOPE_GLOBAL, AAUDIO_PROP_UID, 0), ++ NULL, 0, (void **) &uid, &uid_len) || uid_len > AAUDIO_DEVICE_MAX_UID_LEN) { ++ dev_err(a->dev, "Failed to get device uid for device %llx\n", dev_id); ++ goto fail; ++ } ++ dev_info(a->dev, "Remote device %llx %.*s\n", dev_id, (int) uid_len, uid); ++ ++ sdev->a = a; ++ INIT_LIST_HEAD(&sdev->list); ++ sdev->dev_id = dev_id; ++ sdev->buf_id = AAUDIO_BUFFER_ID_NONE; ++ strncpy(sdev->uid, uid, uid_len); ++ sdev->uid[uid_len + 1] = '\0'; ++ ++ if (aaudio_cmd_get_primitive_property(a, dev_id, dev_id, ++ AAUDIO_PROP(AAUDIO_PROP_SCOPE_INPUT, AAUDIO_PROP_LATENCY, 0), NULL, 0, &sdev->in_latency, sizeof(u32))) ++ dev_warn(a->dev, "Failed to query device input latency\n"); ++ if (aaudio_cmd_get_primitive_property(a, dev_id, dev_id, ++ AAUDIO_PROP(AAUDIO_PROP_SCOPE_OUTPUT, AAUDIO_PROP_LATENCY, 0), NULL, 0, &sdev->out_latency, sizeof(u32))) ++ dev_warn(a->dev, "Failed to query device output latency\n"); ++ ++ if (aaudio_cmd_get_input_stream_list(a, &buf, dev_id, &stream_list, &stream_cnt)) { ++ dev_err(a->dev, "Failed to get input stream list for device %llx\n", dev_id); ++ goto fail; ++ } ++ if (stream_cnt > AAUDIO_DEIVCE_MAX_INPUT_STREAMS) { ++ dev_warn(a->dev, "Device %s input stream count %llu is larger than the supported count of %u\n", ++ sdev->uid, stream_cnt, AAUDIO_DEIVCE_MAX_INPUT_STREAMS); ++ stream_cnt = AAUDIO_DEIVCE_MAX_INPUT_STREAMS; ++ } ++ sdev->in_stream_cnt = stream_cnt; ++ for (i = 0; i < stream_cnt; i++) { ++ sdev->in_streams[i].id = stream_list[i]; ++ sdev->in_streams[i].buffer_cnt = 0; ++ aaudio_init_stream_info(sdev, &sdev->in_streams[i]); ++ sdev->in_streams[i].latency += sdev->in_latency; ++ } + -+ if (new_cores < min || new_cores > max) -+ return -EINVAL; ++ if (aaudio_cmd_get_output_stream_list(a, &buf, dev_id, &stream_list, &stream_cnt)) { ++ dev_err(a->dev, "Failed to get output stream list for device %llx\n", dev_id); ++ goto fail; ++ } ++ if (stream_cnt > AAUDIO_DEIVCE_MAX_OUTPUT_STREAMS) { ++ dev_warn(a->dev, "Device %s input stream count %llu is larger than the supported count of %u\n", ++ sdev->uid, stream_cnt, AAUDIO_DEIVCE_MAX_OUTPUT_STREAMS); ++ stream_cnt = AAUDIO_DEIVCE_MAX_OUTPUT_STREAMS; ++ } ++ sdev->out_stream_cnt = stream_cnt; ++ for (i = 0; i < stream_cnt; i++) { ++ sdev->out_streams[i].id = stream_list[i]; ++ sdev->out_streams[i].buffer_cnt = 0; ++ aaudio_init_stream_info(sdev, &sdev->out_streams[i]); ++ sdev->out_streams[i].latency += sdev->in_latency; ++ } + -+ out_val = 0; -+ out_val |= FIELD_PREP(ASUS_PERF_CORE_MASK, perf_cores); -+ out_val |= FIELD_PREP(ASUS_POWER_CORE_MASK, power_cores); ++ if (sdev->is_pcm) ++ aaudio_create_pcm(sdev); ++ /* Headphone Jack status */ ++ if (!strcmp(sdev->uid, "Codec Output")) { ++ if (snd_jack_new(a->card, sdev->uid, SND_JACK_HEADPHONE, &sdev->jack, true, false)) ++ dev_warn(a->dev, "Failed to create an attached jack for %s\n", sdev->uid); ++ aaudio_cmd_property_listener(a, sdev->dev_id, sdev->dev_id, ++ AAUDIO_PROP(AAUDIO_PROP_SCOPE_OUTPUT, AAUDIO_PROP_JACK_PLUGGED, 0)); ++ aaudio_handle_jack_connection_change(sdev); ++ } + -+ mutex_lock(&asus_armoury.mutex); -+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_CORES, out_val, &result); -+ mutex_unlock(&asus_armoury.mutex); ++ aaudio_reply_free(&buf); + -+ if (err) { -+ pr_warn("Failed to set CPU core count: %d\n", err); -+ return err; -+ } ++ list_add_tail(&sdev->list, &a->subdevice_list); ++ return; + -+ if (result > 1) { -+ pr_warn("Failed to set CPU core count (result): 0x%x\n", result); -+ return -EIO; -+ } ++fail: ++ aaudio_reply_free(&buf); ++ kfree(sdev); ++} + -+ pr_info("CPU core count changed, reboot required\n"); -+ sysfs_notify(kobj, NULL, attr->attr.name); -+ asus_set_reboot_and_signal_event(); ++static void aaudio_init_stream_info(struct aaudio_subdevice *sdev, struct aaudio_stream *strm) ++{ ++ if (aaudio_cmd_get_primitive_property(sdev->a, sdev->dev_id, strm->id, ++ AAUDIO_PROP(AAUDIO_PROP_SCOPE_GLOBAL, AAUDIO_PROP_PHYS_FORMAT, 0), NULL, 0, ++ &strm->desc, sizeof(strm->desc))) ++ dev_warn(sdev->a->dev, "Failed to query stream descriptor\n"); ++ if (aaudio_cmd_get_primitive_property(sdev->a, sdev->dev_id, strm->id, ++ AAUDIO_PROP(AAUDIO_PROP_SCOPE_GLOBAL, AAUDIO_PROP_LATENCY, 0), NULL, 0, &strm->latency, sizeof(u32))) ++ dev_warn(sdev->a->dev, "Failed to query stream latency\n"); ++ if (strm->desc.format_id == AAUDIO_FORMAT_LPCM) ++ sdev->is_pcm = true; ++} + -+ return 0; ++static void aaudio_free_dev(struct aaudio_subdevice *sdev) ++{ ++ size_t i; ++ for (i = 0; i < sdev->in_stream_cnt; i++) { ++ if (sdev->in_streams[i].alsa_hw_desc) ++ kfree(sdev->in_streams[i].alsa_hw_desc); ++ if (sdev->in_streams[i].buffers) ++ kfree(sdev->in_streams[i].buffers); ++ } ++ for (i = 0; i < sdev->out_stream_cnt; i++) { ++ if (sdev->out_streams[i].alsa_hw_desc) ++ kfree(sdev->out_streams[i].alsa_hw_desc); ++ if (sdev->out_streams[i].buffers) ++ kfree(sdev->out_streams[i].buffers); ++ } ++ kfree(sdev); +} + -+static ssize_t cores_performance_min_value_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) ++static struct aaudio_subdevice *aaudio_find_dev_by_dev_id(struct aaudio_device *a, aaudio_device_id_t dev_id) +{ -+ return cores_value_show(kobj, attr, buf, CPU_CORE_PERF, CPU_CORE_MIN); ++ struct aaudio_subdevice *sdev; ++ list_for_each_entry(sdev, &a->subdevice_list, list) { ++ if (dev_id == sdev->dev_id) ++ return sdev; ++ } ++ return NULL; +} + -+static ssize_t cores_performance_max_value_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) ++static struct aaudio_subdevice *aaudio_find_dev_by_uid(struct aaudio_device *a, const char *uid) +{ -+ return cores_value_show(kobj, attr, buf, CPU_CORE_PERF, CPU_CORE_MAX); ++ struct aaudio_subdevice *sdev; ++ list_for_each_entry(sdev, &a->subdevice_list, list) { ++ if (!strcmp(uid, sdev->uid)) ++ return sdev; ++ } ++ return NULL; +} + -+static ssize_t cores_performance_default_value_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) ++static void aaudio_init_bs_stream(struct aaudio_device *a, struct aaudio_stream *strm, ++ struct aaudio_buffer_struct_stream *bs_strm); ++static void aaudio_init_bs_stream_host(struct aaudio_device *a, struct aaudio_stream *strm, ++ struct aaudio_buffer_struct_stream *bs_strm); ++ ++static int aaudio_init_bs(struct aaudio_device *a) +{ -+ return cores_value_show(kobj, attr, buf, CPU_CORE_PERF, CPU_CORE_DEFAULT); ++ int i, j; ++ struct aaudio_buffer_struct_device *dev; ++ struct aaudio_subdevice *sdev; ++ u32 ver, sig, bs_base; ++ ++ ver = ioread32(&a->reg_mem_gpr[0]); ++ if (ver < 3) { ++ dev_err(a->dev, "aaudio: Bad GPR version (%u)", ver); ++ return -EINVAL; ++ } ++ sig = ioread32(&a->reg_mem_gpr[1]); ++ if (sig != AAUDIO_SIG) { ++ dev_err(a->dev, "aaudio: Bad GPR sig (%x)", sig); ++ return -EINVAL; ++ } ++ bs_base = ioread32(&a->reg_mem_gpr[2]); ++ a->bs = (struct aaudio_buffer_struct *) ((u8 *) a->reg_mem_bs + bs_base); ++ if (a->bs->signature != AAUDIO_SIG) { ++ dev_err(a->dev, "aaudio: Bad BufferStruct sig (%x)", a->bs->signature); ++ return -EINVAL; ++ } ++ dev_info(a->dev, "aaudio: BufferStruct ver = %i\n", a->bs->version); ++ dev_info(a->dev, "aaudio: Num devices = %i\n", a->bs->num_devices); ++ for (i = 0; i < a->bs->num_devices; i++) { ++ dev = &a->bs->devices[i]; ++ dev_info(a->dev, "aaudio: Device %i %s\n", i, dev->name); ++ ++ sdev = aaudio_find_dev_by_uid(a, dev->name); ++ if (!sdev) { ++ dev_err(a->dev, "aaudio: Subdevice not found for BufferStruct device %s\n", dev->name); ++ continue; ++ } ++ sdev->buf_id = (u8) i; ++ dev->num_input_streams = 0; ++ for (j = 0; j < dev->num_output_streams; j++) { ++ dev_info(a->dev, "aaudio: Device %i Stream %i: Output; Buffer Count = %i\n", i, j, ++ dev->output_streams[j].num_buffers); ++ if (j < sdev->out_stream_cnt) ++ aaudio_init_bs_stream(a, &sdev->out_streams[j], &dev->output_streams[j]); ++ } ++ } ++ ++ list_for_each_entry(sdev, &a->subdevice_list, list) { ++ if (sdev->buf_id != AAUDIO_BUFFER_ID_NONE) ++ continue; ++ sdev->buf_id = i; ++ dev_info(a->dev, "aaudio: Created device %i %s\n", i, sdev->uid); ++ strcpy(a->bs->devices[i].name, sdev->uid); ++ a->bs->devices[i].num_input_streams = 0; ++ a->bs->devices[i].num_output_streams = 0; ++ a->bs->num_devices = ++i; ++ } ++ list_for_each_entry(sdev, &a->subdevice_list, list) { ++ if (sdev->in_stream_cnt == 1) { ++ dev_info(a->dev, "aaudio: Device %i Host Stream; Input\n", sdev->buf_id); ++ aaudio_init_bs_stream_host(a, &sdev->in_streams[0], &a->bs->devices[sdev->buf_id].input_streams[0]); ++ a->bs->devices[sdev->buf_id].num_input_streams = 1; ++ wmb(); ++ ++ if (aaudio_cmd_set_input_stream_address_ranges(a, sdev->dev_id)) { ++ dev_err(a->dev, "aaudio: Failed to set input stream address ranges\n"); ++ } ++ } ++ } ++ ++ return 0; +} + -+static ssize_t cores_performance_current_value_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) ++static void aaudio_init_bs_stream(struct aaudio_device *a, struct aaudio_stream *strm, ++ struct aaudio_buffer_struct_stream *bs_strm) +{ -+ return cores_value_show(kobj, attr, buf, CPU_CORE_PERF, CPU_CORE_CURRENT); ++ size_t i; ++ strm->buffer_cnt = bs_strm->num_buffers; ++ if (bs_strm->num_buffers > AAUDIO_DEIVCE_MAX_BUFFER_COUNT) { ++ dev_warn(a->dev, "BufferStruct buffer count %u exceeds driver limit of %u\n", bs_strm->num_buffers, ++ AAUDIO_DEIVCE_MAX_BUFFER_COUNT); ++ strm->buffer_cnt = AAUDIO_DEIVCE_MAX_BUFFER_COUNT; ++ } ++ if (!strm->buffer_cnt) ++ return; ++ strm->buffers = kmalloc_array(strm->buffer_cnt, sizeof(struct aaudio_dma_buf), GFP_KERNEL); ++ if (!strm->buffers) { ++ dev_err(a->dev, "Buffer list allocation failed\n"); ++ return; ++ } ++ for (i = 0; i < strm->buffer_cnt; i++) { ++ strm->buffers[i].dma_addr = a->reg_mem_bs_dma + (dma_addr_t) bs_strm->buffers[i].address; ++ strm->buffers[i].ptr = a->reg_mem_bs + bs_strm->buffers[i].address; ++ strm->buffers[i].size = bs_strm->buffers[i].size; ++ } ++ ++ if (strm->buffer_cnt == 1) { ++ strm->alsa_hw_desc = kmalloc(sizeof(struct snd_pcm_hardware), GFP_KERNEL); ++ if (aaudio_create_hw_info(&strm->desc, strm->alsa_hw_desc, strm->buffers[0].size)) { ++ kfree(strm->alsa_hw_desc); ++ strm->alsa_hw_desc = NULL; ++ } ++ } +} + -+static ssize_t cores_performance_current_value_store(struct kobject *kobj, -+ struct kobj_attribute *attr, -+ const char *buf, size_t count) ++static void aaudio_init_bs_stream_host(struct aaudio_device *a, struct aaudio_stream *strm, ++ struct aaudio_buffer_struct_stream *bs_strm) +{ -+ int err; ++ size_t size; ++ dma_addr_t dma_addr; ++ void *dma_ptr; ++ size = strm->desc.bytes_per_packet * 16640; ++ dma_ptr = dma_alloc_coherent(&a->pci->dev, size, &dma_addr, GFP_KERNEL); ++ if (!dma_ptr) { ++ dev_err(a->dev, "dma_alloc_coherent failed\n"); ++ return; ++ } ++ bs_strm->buffers[0].address = dma_addr; ++ bs_strm->buffers[0].size = size; ++ bs_strm->num_buffers = 1; + -+ err = cores_current_value_store(kobj, attr, buf, CPU_CORE_PERF); -+ if (err) -+ return err; ++ memset(dma_ptr, 0, size); + -+ return count; ++ strm->buffer_cnt = 1; ++ strm->buffers = kmalloc_array(strm->buffer_cnt, sizeof(struct aaudio_dma_buf), GFP_KERNEL); ++ if (!strm->buffers) { ++ dev_err(a->dev, "Buffer list allocation failed\n"); ++ return; ++ } ++ strm->buffers[0].dma_addr = dma_addr; ++ strm->buffers[0].ptr = dma_ptr; ++ strm->buffers[0].size = size; ++ ++ strm->alsa_hw_desc = kmalloc(sizeof(struct snd_pcm_hardware), GFP_KERNEL); ++ if (aaudio_create_hw_info(&strm->desc, strm->alsa_hw_desc, strm->buffers[0].size)) { ++ kfree(strm->alsa_hw_desc); ++ strm->alsa_hw_desc = NULL; ++ } +} -+ATTR_GROUP_CORES_RW(cores_performance, "cores_performance", -+ "Set the max available performance cores"); + -+static ssize_t cores_efficiency_min_value_show(struct kobject *kobj, struct kobj_attribute *attr, -+ char *buf) ++static void aaudio_handle_prop_change(struct aaudio_device *a, struct aaudio_msg *msg); ++ ++void aaudio_handle_notification(struct aaudio_device *a, struct aaudio_msg *msg) +{ -+ return cores_value_show(kobj, attr, buf, CPU_CORE_POWER, CPU_CORE_MIN); ++ struct aaudio_send_ctx sctx; ++ struct aaudio_msg_base base; ++ if (aaudio_msg_read_base(msg, &base)) ++ return; ++ switch (base.msg) { ++ case AAUDIO_MSG_NOTIFICATION_BOOT: ++ dev_info(a->dev, "Received boot notification from remote\n"); ++ ++ /* Resend the alive notify */ ++ if (aaudio_send(a, &sctx, 500, ++ aaudio_msg_write_alive_notification, 1, 3)) { ++ pr_err("Sending alive notification failed\n"); ++ } ++ break; ++ case AAUDIO_MSG_NOTIFICATION_ALIVE: ++ dev_info(a->dev, "Received alive notification from remote\n"); ++ complete_all(&a->remote_alive); ++ break; ++ case AAUDIO_MSG_PROPERTY_CHANGED: ++ aaudio_handle_prop_change(a, msg); ++ break; ++ default: ++ dev_info(a->dev, "Unhandled notification %i", base.msg); ++ break; ++ } +} + -+static ssize_t cores_efficiency_max_value_show(struct kobject *kobj, struct kobj_attribute *attr, -+ char *buf) ++struct aaudio_prop_change_work_struct { ++ struct work_struct ws; ++ struct aaudio_device *a; ++ aaudio_device_id_t dev; ++ aaudio_object_id_t obj; ++ struct aaudio_prop_addr prop; ++}; ++ ++static void aaudio_handle_jack_connection_change(struct aaudio_subdevice *sdev) ++{ ++ u32 plugged; ++ if (!sdev->jack) ++ return; ++ /* NOTE: Apple made the plug status scoped to the input and output streams. This makes no sense for us, so I just ++ * always pick the OUTPUT status. */ ++ if (aaudio_cmd_get_primitive_property(sdev->a, sdev->dev_id, sdev->dev_id, ++ AAUDIO_PROP(AAUDIO_PROP_SCOPE_OUTPUT, AAUDIO_PROP_JACK_PLUGGED, 0), NULL, 0, &plugged, sizeof(plugged))) { ++ dev_err(sdev->a->dev, "Failed to get jack enable status\n"); ++ return; ++ } ++ dev_dbg(sdev->a->dev, "Jack is now %s\n", plugged ? "plugged" : "unplugged"); ++ snd_jack_report(sdev->jack, plugged ? sdev->jack->type : 0); ++} ++ ++void aaudio_handle_prop_change_work(struct work_struct *ws) +{ -+ return cores_value_show(kobj, attr, buf, CPU_CORE_POWER, CPU_CORE_MAX); ++ struct aaudio_prop_change_work_struct *work = container_of(ws, struct aaudio_prop_change_work_struct, ws); ++ struct aaudio_subdevice *sdev; ++ ++ sdev = aaudio_find_dev_by_dev_id(work->a, work->dev); ++ if (!sdev) { ++ dev_err(work->a->dev, "Property notification change: device not found\n"); ++ goto done; ++ } ++ dev_dbg(work->a->dev, "Property changed for device: %s\n", sdev->uid); ++ ++ if (work->prop.scope == AAUDIO_PROP_SCOPE_OUTPUT && work->prop.selector == AAUDIO_PROP_JACK_PLUGGED) { ++ aaudio_handle_jack_connection_change(sdev); ++ } ++ ++done: ++ kfree(work); +} + -+static ssize_t cores_efficiency_default_value_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) ++void aaudio_handle_prop_change(struct aaudio_device *a, struct aaudio_msg *msg) +{ -+ return cores_value_show(kobj, attr, buf, CPU_CORE_POWER, CPU_CORE_DEFAULT); ++ /* NOTE: This is a scheduled work because this callback will generally need to query device information and this ++ * is not possible when we are in the reply parsing code's context. */ ++ struct aaudio_prop_change_work_struct *work; ++ work = kmalloc(sizeof(struct aaudio_prop_change_work_struct), GFP_KERNEL); ++ work->a = a; ++ INIT_WORK(&work->ws, aaudio_handle_prop_change_work); ++ aaudio_msg_read_property_changed(msg, &work->dev, &work->obj, &work->prop); ++ schedule_work(&work->ws); +} + -+static ssize_t cores_efficiency_current_value_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) ++#define aaudio_send_cmd_response(a, sctx, msg, fn, ...) \ ++ if (aaudio_send_with_tag(a, sctx, ((struct aaudio_msg_header *) msg->data)->tag, 500, fn, ##__VA_ARGS__)) \ ++ pr_err("aaudio: Failed to reply to a command\n"); ++ ++void aaudio_handle_cmd_timestamp(struct aaudio_device *a, struct aaudio_msg *msg) +{ -+ return cores_value_show(kobj, attr, buf, CPU_CORE_POWER, CPU_CORE_CURRENT); ++ ktime_t time_os = ktime_get_boottime(); ++ struct aaudio_send_ctx sctx; ++ struct aaudio_subdevice *sdev; ++ u64 devid, timestamp, update_seed; ++ aaudio_msg_read_update_timestamp(msg, &devid, ×tamp, &update_seed); ++ dev_dbg(a->dev, "Received timestamp update for dev=%llx ts=%llx seed=%llx\n", devid, timestamp, update_seed); ++ ++ sdev = aaudio_find_dev_by_dev_id(a, devid); ++ aaudio_handle_timestamp(sdev, time_os, timestamp); ++ ++ aaudio_send_cmd_response(a, &sctx, msg, ++ aaudio_msg_write_update_timestamp_response); +} + -+static ssize_t cores_efficiency_current_value_store(struct kobject *kobj, -+ struct kobj_attribute *attr, const char *buf, -+ size_t count) ++void aaudio_handle_command(struct aaudio_device *a, struct aaudio_msg *msg) +{ -+ int err; ++ struct aaudio_msg_base base; ++ if (aaudio_msg_read_base(msg, &base)) ++ return; ++ switch (base.msg) { ++ case AAUDIO_MSG_UPDATE_TIMESTAMP: ++ aaudio_handle_cmd_timestamp(a, msg); ++ break; ++ default: ++ dev_info(a->dev, "Unhandled device command %i", base.msg); ++ break; ++ } ++} + -+ err = cores_current_value_store(kobj, attr, buf, CPU_CORE_POWER); -+ if (err) -+ return err; ++static struct pci_device_id aaudio_ids[ ] = { ++ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x1803) }, ++ { 0, }, ++}; + -+ return count; ++struct dev_pm_ops aaudio_pci_driver_pm = { ++ .suspend = aaudio_suspend, ++ .resume = aaudio_resume ++}; ++struct pci_driver aaudio_pci_driver = { ++ .name = "aaudio", ++ .id_table = aaudio_ids, ++ .probe = aaudio_probe, ++ .remove = aaudio_remove, ++ .driver = { ++ .pm = &aaudio_pci_driver_pm ++ } ++}; ++ ++ ++int aaudio_module_init(void) ++{ ++ int result; ++ if ((result = alloc_chrdev_region(&aaudio_chrdev, 0, 1, "aaudio"))) ++ goto fail_chrdev; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,4,0) ++ aaudio_class = class_create(THIS_MODULE, "aaudio"); ++#else ++ aaudio_class = class_create("aaudio"); ++#endif ++ if (IS_ERR(aaudio_class)) { ++ result = PTR_ERR(aaudio_class); ++ goto fail_class; ++ } ++ ++ result = pci_register_driver(&aaudio_pci_driver); ++ if (result) ++ goto fail_drv; ++ return 0; ++ ++fail_drv: ++ pci_unregister_driver(&aaudio_pci_driver); ++fail_class: ++ class_destroy(aaudio_class); ++fail_chrdev: ++ unregister_chrdev_region(aaudio_chrdev, 1); ++ if (!result) ++ result = -EINVAL; ++ return result; +} -+ATTR_GROUP_CORES_RW(cores_efficiency, "cores_efficiency", -+ "Set the max available efficiency cores"); + -+/* Simple attribute creation */ -+ATTR_GROUP_ROG_TUNABLE(ppt_pl1_spl, "ppt_pl1_spl", ASUS_WMI_DEVID_PPT_PL1_SPL, cpu_default, -+ cpu_min, cpu_max, 1, "Set the CPU slow package limit"); -+ATTR_GROUP_ROG_TUNABLE(ppt_pl2_sppt, "ppt_pl2_sppt", ASUS_WMI_DEVID_PPT_PL2_SPPT, cpu_default, -+ cpu_min, cpu_max, 1, "Set the CPU fast package limit"); -+ATTR_GROUP_ROG_TUNABLE(ppt_pl3_fppt, "ppt_pl3_fppt", ASUS_WMI_DEVID_PPT_FPPT, cpu_default, cpu_min, -+ cpu_max, 1, "Set the CPU slow package limit"); -+ATTR_GROUP_ROG_TUNABLE(ppt_apu_sppt, "ppt_apu_sppt", ASUS_WMI_DEVID_PPT_APU_SPPT, -+ platform_default, platform_min, platform_max, 1, -+ "Set the CPU slow package limit"); -+ATTR_GROUP_ROG_TUNABLE(ppt_platform_sppt, "ppt_platform_sppt", ASUS_WMI_DEVID_PPT_PLAT_SPPT, -+ platform_default, platform_min, platform_max, 1, -+ "Set the CPU slow package limit"); -+ATTR_GROUP_ROG_TUNABLE(nv_dynamic_boost, "nv_dynamic_boost", ASUS_WMI_DEVID_NV_DYN_BOOST, -+ nv_boost_default, nv_boost_min, nv_boost_max, 1, -+ "Set the Nvidia dynamic boost limit"); -+ATTR_GROUP_ROG_TUNABLE(nv_temp_target, "nv_temp_target", ASUS_WMI_DEVID_NV_THERM_TARGET, -+ nv_temp_default, nv_boost_min, nv_temp_max, 1, -+ "Set the Nvidia max thermal limit"); -+ATTR_GROUP_ROG_TUNABLE(dgpu_tgp, "dgpu_tgp", ASUS_WMI_DEVID_DGPU_SET_TGP, dgpu_tgp_default, -+ dgpu_tgp_min, dgpu_tgp_max, 1, -+ "Set the additional TGP on top of the base TGP"); ++void aaudio_module_exit(void) ++{ ++ pci_unregister_driver(&aaudio_pci_driver); ++ class_destroy(aaudio_class); ++ unregister_chrdev_region(aaudio_chrdev, 1); ++} + -+ATTR_GROUP_INT_VALUE_ONLY_RO(dgpu_base_tgp, "dgpu_base_tgp", ASUS_WMI_DEVID_DGPU_BASE_TGP, -+ "Read the base TGP value"); ++struct aaudio_alsa_pcm_id_mapping aaudio_alsa_id_mappings[] = { ++ {"Speaker", 0}, ++ {"Digital Mic", 1}, ++ {"Codec Output", 2}, ++ {"Codec Input", 3}, ++ {"Bridge Loopback", 4}, ++ {} ++}; + -+ATTR_GROUP_ENUM_INT_RO(charge_mode, "charge_mode", ASUS_WMI_DEVID_CHARGE_MODE, "0;1;2", -+ "Show the current mode of charging"); ++module_param_named(index, aaudio_alsa_index, int, 0444); ++MODULE_PARM_DESC(index, "Index value for Apple Internal Audio soundcard."); ++module_param_named(id, aaudio_alsa_id, charp, 0444); ++MODULE_PARM_DESC(id, "ID string for Apple Internal Audio soundcard."); +diff --git a/drivers/staging/apple-bce/audio/audio.h b/drivers/staging/apple-bce/audio/audio.h +new file mode 100644 +index 000000000000..004bc1e22ea4 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/audio.h +@@ -0,0 +1,125 @@ ++#ifndef AAUDIO_H ++#define AAUDIO_H + -+ATTR_GROUP_BOOL_RW(boot_sound, "boot_sound", ASUS_WMI_DEVID_BOOT_SOUND, -+ "Set the boot POST sound"); -+ATTR_GROUP_BOOL_RW(mcu_powersave, "mcu_powersave", ASUS_WMI_DEVID_MCU_POWERSAVE, -+ "Set MCU powersaving mode"); -+ATTR_GROUP_BOOL_RW(panel_od, "panel_overdrive", ASUS_WMI_DEVID_PANEL_OD, -+ "Set the panel refresh overdrive"); -+ATTR_GROUP_BOOL_RW(panel_hd_mode, "panel_hd_mode", ASUS_WMI_DEVID_PANEL_HD, -+ "Set the panel HD mode to UHD<0> or FHD<1>"); -+ATTR_GROUP_BOOL_RO(egpu_connected, "egpu_connected", ASUS_WMI_DEVID_EGPU_CONNECTED, -+ "Show the eGPU connection status"); ++#include ++#include ++#include "../apple_bce.h" ++#include "protocol_bce.h" ++#include "description.h" + -+/* If an attribute does not require any special case handling add it here */ -+static const struct asus_attr_group armoury_attr_groups[] = { -+ { &egpu_connected_attr_group, ASUS_WMI_DEVID_EGPU_CONNECTED }, -+ { &egpu_enable_attr_group, ASUS_WMI_DEVID_EGPU }, -+ { &dgpu_disable_attr_group, ASUS_WMI_DEVID_DGPU }, ++#define AAUDIO_SIG 0x19870423 + -+ { &ppt_pl1_spl_attr_group, ASUS_WMI_DEVID_PPT_PL1_SPL }, -+ { &ppt_pl2_sppt_attr_group, ASUS_WMI_DEVID_PPT_PL2_SPPT }, -+ { &ppt_pl3_fppt_attr_group, ASUS_WMI_DEVID_PPT_FPPT }, -+ { &ppt_apu_sppt_attr_group, ASUS_WMI_DEVID_PPT_APU_SPPT }, -+ { &ppt_platform_sppt_attr_group, ASUS_WMI_DEVID_PPT_PLAT_SPPT }, -+ { &nv_dynamic_boost_attr_group, ASUS_WMI_DEVID_NV_DYN_BOOST }, -+ { &nv_temp_target_attr_group, ASUS_WMI_DEVID_NV_THERM_TARGET }, -+ { &dgpu_base_tgp_attr_group, ASUS_WMI_DEVID_DGPU_BASE_TGP }, -+ { &dgpu_tgp_attr_group, ASUS_WMI_DEVID_DGPU_SET_TGP }, -+ { &apu_mem_attr_group, ASUS_WMI_DEVID_APU_MEM }, -+ { &cores_efficiency_attr_group, ASUS_WMI_DEVID_CORES_MAX }, -+ { &cores_performance_attr_group, ASUS_WMI_DEVID_CORES_MAX }, ++#define AAUDIO_DEVICE_MAX_UID_LEN 128 ++#define AAUDIO_DEIVCE_MAX_INPUT_STREAMS 1 ++#define AAUDIO_DEIVCE_MAX_OUTPUT_STREAMS 1 ++#define AAUDIO_DEIVCE_MAX_BUFFER_COUNT 1 + -+ { &charge_mode_attr_group, ASUS_WMI_DEVID_CHARGE_MODE }, -+ { &boot_sound_attr_group, ASUS_WMI_DEVID_BOOT_SOUND }, -+ { &mcu_powersave_attr_group, ASUS_WMI_DEVID_MCU_POWERSAVE }, -+ { &panel_od_attr_group, ASUS_WMI_DEVID_PANEL_OD }, -+ { &panel_hd_mode_attr_group, ASUS_WMI_DEVID_PANEL_HD }, ++#define AAUDIO_BUFFER_ID_NONE 0xffu ++ ++struct snd_card; ++struct snd_pcm; ++struct snd_pcm_hardware; ++struct snd_jack; ++ ++struct __attribute__((packed)) __attribute__((aligned(4))) aaudio_buffer_struct_buffer { ++ size_t address; ++ size_t size; ++ size_t pad[4]; ++}; ++struct aaudio_buffer_struct_stream { ++ u8 num_buffers; ++ struct aaudio_buffer_struct_buffer buffers[100]; ++ char filler[32]; ++}; ++struct aaudio_buffer_struct_device { ++ char name[128]; ++ u8 num_input_streams; ++ u8 num_output_streams; ++ struct aaudio_buffer_struct_stream input_streams[5]; ++ struct aaudio_buffer_struct_stream output_streams[5]; ++ char filler[128]; ++}; ++struct aaudio_buffer_struct { ++ u32 version; ++ u32 signature; ++ u32 flags; ++ u8 num_devices; ++ struct aaudio_buffer_struct_device devices[20]; +}; + -+static int asus_fw_attr_add(void) -+{ -+ int err, i; ++struct aaudio_device; ++struct aaudio_dma_buf { ++ dma_addr_t dma_addr; ++ void *ptr; ++ size_t size; ++}; ++struct aaudio_stream { ++ aaudio_object_id_t id; ++ size_t buffer_cnt; ++ struct aaudio_dma_buf *buffers; + -+ err = fw_attributes_class_get(&fw_attr_class); -+ if (err) -+ return err; ++ struct aaudio_apple_description desc; ++ struct snd_pcm_hardware *alsa_hw_desc; ++ u32 latency; + -+ asus_armoury.fw_attr_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0), -+ NULL, "%s", DRIVER_NAME); -+ if (IS_ERR(asus_armoury.fw_attr_dev)) { -+ err = PTR_ERR(asus_armoury.fw_attr_dev); -+ goto fail_class_get; -+ } ++ bool waiting_for_first_ts; + -+ asus_armoury.fw_attr_kset = kset_create_and_add("attributes", NULL, -+ &asus_armoury.fw_attr_dev->kobj); -+ if (!asus_armoury.fw_attr_kset) { -+ err = -ENOMEM; -+ goto err_destroy_classdev; -+ } ++ ktime_t remote_timestamp; ++ snd_pcm_sframes_t frame_min; ++ int started; ++}; ++struct aaudio_subdevice { ++ struct aaudio_device *a; ++ struct list_head list; ++ aaudio_device_id_t dev_id; ++ u32 in_latency, out_latency; ++ u8 buf_id; ++ int alsa_id; ++ char uid[AAUDIO_DEVICE_MAX_UID_LEN + 1]; ++ size_t in_stream_cnt; ++ struct aaudio_stream in_streams[AAUDIO_DEIVCE_MAX_INPUT_STREAMS]; ++ size_t out_stream_cnt; ++ struct aaudio_stream out_streams[AAUDIO_DEIVCE_MAX_OUTPUT_STREAMS]; ++ bool is_pcm; ++ struct snd_pcm *pcm; ++ struct snd_jack *jack; ++}; ++struct aaudio_alsa_pcm_id_mapping { ++ const char *name; ++ int alsa_id; ++}; + -+ err = sysfs_create_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr); -+ if (err) { -+ pr_err("Failed to create sysfs level attributes\n"); -+ goto err_destroy_kset; -+ } ++struct aaudio_device { ++ struct pci_dev *pci; ++ dev_t devt; ++ struct device *dev; ++ void __iomem *reg_mem_bs; ++ dma_addr_t reg_mem_bs_dma; ++ void __iomem *reg_mem_cfg; + -+ asus_armoury.mini_led_dev_id = 0; -+ if (asus_wmi_is_present(ASUS_WMI_DEVID_MINI_LED_MODE)) { -+ asus_armoury.mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE; -+ } else if (asus_wmi_is_present(ASUS_WMI_DEVID_MINI_LED_MODE2)) { -+ asus_armoury.mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE2; -+ } ++ u32 __iomem *reg_mem_gpr; + -+ if (asus_armoury.mini_led_dev_id) { -+ err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj, &mini_led_mode_attr_group); -+ if (err) { -+ pr_err("Failed to create sysfs-group for mini_led\n"); -+ goto err_remove_file; -+ } -+ } ++ struct aaudio_buffer_struct *bs; + -+ asus_armoury.gpu_mux_dev_id = 0; -+ if (asus_wmi_is_present(ASUS_WMI_DEVID_GPU_MUX)) { -+ asus_armoury.gpu_mux_dev_id = ASUS_WMI_DEVID_GPU_MUX; -+ } else if (asus_wmi_is_present(ASUS_WMI_DEVID_GPU_MUX_VIVO)) { -+ asus_armoury.gpu_mux_dev_id = ASUS_WMI_DEVID_GPU_MUX_VIVO; -+ } ++ struct apple_bce_device *bce; ++ struct aaudio_bce bcem; + -+ if (asus_armoury.gpu_mux_dev_id) { -+ err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj, &gpu_mux_mode_attr_group); -+ if (err) { -+ pr_err("Failed to create sysfs-group for gpu_mux\n"); -+ goto err_remove_mini_led_group; -+ } -+ } ++ struct snd_card *card; + -+ for (i = 0; i < ARRAY_SIZE(armoury_attr_groups); i++) { -+ if (!asus_wmi_is_present(armoury_attr_groups[i].wmi_devid)) -+ continue; ++ struct list_head subdevice_list; ++ int next_alsa_id; + -+ err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj, -+ armoury_attr_groups[i].attr_group); -+ if (err) { -+ pr_err("Failed to create sysfs-group for %s\n", -+ armoury_attr_groups[i].attr_group->name); -+ goto err_remove_groups; -+ } -+ } ++ struct completion remote_alive; ++}; + -+ return 0; ++void aaudio_handle_notification(struct aaudio_device *a, struct aaudio_msg *msg); ++void aaudio_handle_prop_change_work(struct work_struct *ws); ++void aaudio_handle_cmd_timestamp(struct aaudio_device *a, struct aaudio_msg *msg); ++void aaudio_handle_command(struct aaudio_device *a, struct aaudio_msg *msg); + -+err_remove_groups: -+ while (--i >= 0) { -+ if (asus_wmi_is_present(armoury_attr_groups[i].wmi_devid)) -+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, armoury_attr_groups[i].attr_group); -+ } -+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &gpu_mux_mode_attr_group); -+err_remove_mini_led_group: -+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &mini_led_mode_attr_group); -+err_remove_file: -+ sysfs_remove_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr); -+err_destroy_kset: -+ kset_unregister(asus_armoury.fw_attr_kset); -+err_destroy_classdev: -+ device_destroy(fw_attr_class, MKDEV(0, 0)); -+fail_class_get: -+ fw_attributes_class_put(); -+ return err; -+} ++int aaudio_module_init(void); ++void aaudio_module_exit(void); + -+/* Init / exit ****************************************************************/ ++extern struct aaudio_alsa_pcm_id_mapping aaudio_alsa_id_mappings[]; + -+/* Set up the min/max and defaults for ROG tunables */ -+static void init_rog_tunables(struct rog_tunables *rog) ++#endif //AAUDIO_H +diff --git a/drivers/staging/apple-bce/audio/description.h b/drivers/staging/apple-bce/audio/description.h +new file mode 100644 +index 000000000000..dfef3ab68f27 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/description.h +@@ -0,0 +1,42 @@ ++#ifndef AAUDIO_DESCRIPTION_H ++#define AAUDIO_DESCRIPTION_H ++ ++#include ++ ++struct aaudio_apple_description { ++ u64 sample_rate_double; ++ u32 format_id; ++ u32 format_flags; ++ u32 bytes_per_packet; ++ u32 frames_per_packet; ++ u32 bytes_per_frame; ++ u32 channels_per_frame; ++ u32 bits_per_channel; ++ u32 reserved; ++}; ++ ++enum { ++ AAUDIO_FORMAT_LPCM = 0x6c70636d // 'lpcm' ++}; ++ ++enum { ++ AAUDIO_FORMAT_FLAG_FLOAT = 1, ++ AAUDIO_FORMAT_FLAG_BIG_ENDIAN = 2, ++ AAUDIO_FORMAT_FLAG_SIGNED = 4, ++ AAUDIO_FORMAT_FLAG_PACKED = 8, ++ AAUDIO_FORMAT_FLAG_ALIGNED_HIGH = 16, ++ AAUDIO_FORMAT_FLAG_NON_INTERLEAVED = 32, ++ AAUDIO_FORMAT_FLAG_NON_MIXABLE = 64 ++}; ++ ++static inline u64 aaudio_double_to_u64(u64 d) +{ -+ u32 platform_default = PPT_PLATFORM_DEFAULT; -+ u32 cpu_default = PPT_CPU_LIMIT_DEFAULT; -+ u32 platform_max = PPT_PLATFORM_MAX; -+ u32 max_boost = NVIDIA_BOOST_MAX; -+ u32 cpu_max = PPT_CPU_LIMIT_MAX; -+ const char *product; ++ u8 sign = (u8) ((d >> 63) & 1); ++ s32 exp = (s32) ((d >> 52) & 0x7ff) - 1023; ++ u64 fr = d & ((1LL << 52) - 1); ++ if (sign || exp < 0) ++ return 0; ++ return (u64) ((1LL << exp) + (fr >> (52 - exp))); ++} + -+ /* -+ * ASUS product_name contains everything required, e.g, -+ * "ROG Flow X16 GV601VV_GV601VV_00185149B". -+ * The bulk of these defaults are gained from users reporting what -+ * ASUS Armoury Crate in Windows provides them. -+ * This should be turned in to a table eventually. -+ */ -+ product = dmi_get_system_info(DMI_PRODUCT_NAME); ++#endif //AAUDIO_DESCRIPTION_H +diff --git a/drivers/staging/apple-bce/audio/pcm.c b/drivers/staging/apple-bce/audio/pcm.c +new file mode 100644 +index 000000000000..1026e10a9ac5 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/pcm.c +@@ -0,0 +1,308 @@ ++#include "pcm.h" ++#include "audio.h" ++ ++static u64 aaudio_get_alsa_fmtbit(struct aaudio_apple_description *desc) ++{ ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_FLOAT) { ++ if (desc->bits_per_channel == 32) { ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_BIG_ENDIAN) ++ return SNDRV_PCM_FMTBIT_FLOAT_BE; ++ else ++ return SNDRV_PCM_FMTBIT_FLOAT_LE; ++ } else if (desc->bits_per_channel == 64) { ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_BIG_ENDIAN) ++ return SNDRV_PCM_FMTBIT_FLOAT64_BE; ++ else ++ return SNDRV_PCM_FMTBIT_FLOAT64_LE; ++ } else { ++ pr_err("aaudio: unsupported bits per channel for float format: %u\n", desc->bits_per_channel); ++ return 0; ++ } ++ } ++#define DEFINE_BPC_OPTION(val, b) \ ++ case val: \ ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_BIG_ENDIAN) { \ ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_SIGNED) \ ++ return SNDRV_PCM_FMTBIT_S ## b ## BE; \ ++ else \ ++ return SNDRV_PCM_FMTBIT_U ## b ## BE; \ ++ } else { \ ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_SIGNED) \ ++ return SNDRV_PCM_FMTBIT_S ## b ## LE; \ ++ else \ ++ return SNDRV_PCM_FMTBIT_U ## b ## LE; \ ++ } ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_PACKED) { ++ switch (desc->bits_per_channel) { ++ case 8: ++ case 16: ++ case 32: ++ break; ++ DEFINE_BPC_OPTION(24, 24_3) ++ default: ++ pr_err("aaudio: unsupported bits per channel for packed format: %u\n", desc->bits_per_channel); ++ return 0; ++ } ++ } ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_ALIGNED_HIGH) { ++ switch (desc->bits_per_channel) { ++ DEFINE_BPC_OPTION(24, 32_) ++ default: ++ pr_err("aaudio: unsupported bits per channel for high-aligned format: %u\n", desc->bits_per_channel); ++ return 0; ++ } ++ } ++ switch (desc->bits_per_channel) { ++ case 8: ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_SIGNED) ++ return SNDRV_PCM_FMTBIT_S8; ++ else ++ return SNDRV_PCM_FMTBIT_U8; ++ DEFINE_BPC_OPTION(16, 16_) ++ DEFINE_BPC_OPTION(24, 24_) ++ DEFINE_BPC_OPTION(32, 32_) ++ default: ++ pr_err("aaudio: unsupported bits per channel: %u\n", desc->bits_per_channel); ++ return 0; ++ } ++} ++int aaudio_create_hw_info(struct aaudio_apple_description *desc, struct snd_pcm_hardware *alsa_hw, ++ size_t buf_size) ++{ ++ uint rate; ++ alsa_hw->info = (SNDRV_PCM_INFO_MMAP | ++ SNDRV_PCM_INFO_BLOCK_TRANSFER | ++ SNDRV_PCM_INFO_MMAP_VALID | ++ SNDRV_PCM_INFO_DOUBLE); ++ if (desc->format_flags & AAUDIO_FORMAT_FLAG_NON_MIXABLE) ++ pr_warn("aaudio: unsupported hw flag: NON_MIXABLE\n"); ++ if (!(desc->format_flags & AAUDIO_FORMAT_FLAG_NON_INTERLEAVED)) ++ alsa_hw->info |= SNDRV_PCM_INFO_INTERLEAVED; ++ alsa_hw->formats = aaudio_get_alsa_fmtbit(desc); ++ if (!alsa_hw->formats) ++ return -EINVAL; ++ rate = (uint) aaudio_double_to_u64(desc->sample_rate_double); ++ alsa_hw->rates = snd_pcm_rate_to_rate_bit(rate); ++ alsa_hw->rate_min = rate; ++ alsa_hw->rate_max = rate; ++ alsa_hw->channels_min = desc->channels_per_frame; ++ alsa_hw->channels_max = desc->channels_per_frame; ++ alsa_hw->buffer_bytes_max = buf_size; ++ alsa_hw->period_bytes_min = desc->bytes_per_packet; ++ alsa_hw->period_bytes_max = desc->bytes_per_packet; ++ alsa_hw->periods_min = (uint) (buf_size / desc->bytes_per_packet); ++ alsa_hw->periods_max = (uint) (buf_size / desc->bytes_per_packet); ++ pr_debug("aaudio_create_hw_info: format = %llu, rate = %u/%u. channels = %u, periods = %u, period size = %lu\n", ++ alsa_hw->formats, alsa_hw->rate_min, alsa_hw->rates, alsa_hw->channels_min, alsa_hw->periods_min, ++ alsa_hw->period_bytes_min); ++ return 0; ++} + -+ if (strstr(product, "GA402R")) { -+ cpu_default = 125; -+ } else if (strstr(product, "13QY")) { -+ cpu_max = 250; -+ } else if (strstr(product, "X13")) { -+ cpu_max = 75; -+ cpu_default = 50; -+ } else if (strstr(product, "RC71") || strstr(product, "RC72")) { -+ cpu_max = 50; -+ cpu_default = 30; -+ } else if (strstr(product, "G814") || strstr(product, "G614") || -+ strstr(product, "G834") || strstr(product, "G634")) { -+ cpu_max = 175; -+ } else if (strstr(product, "GA402X") || strstr(product, "GA403") || -+ strstr(product, "FA507N") || strstr(product, "FA507X") || -+ strstr(product, "FA707N") || strstr(product, "FA707X")) { -+ cpu_max = 90; -+ } else { -+ pr_notice("Using default CPU limits. Please report if these are not correct.\n"); -+ } ++static struct aaudio_stream *aaudio_pcm_stream(struct snd_pcm_substream *substream) ++{ ++ struct aaudio_subdevice *sdev = snd_pcm_substream_chip(substream); ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ++ return &sdev->out_streams[substream->number]; ++ else ++ return &sdev->in_streams[substream->number]; ++} + -+ if (strstr(product, "GZ301ZE")) -+ max_boost = 5; -+ else if (strstr(product, "FX507ZC4")) -+ max_boost = 15; -+ else if (strstr(product, "GU605")) -+ max_boost = 20; ++static int aaudio_pcm_open(struct snd_pcm_substream *substream) ++{ ++ pr_debug("aaudio_pcm_open\n"); ++ substream->runtime->hw = *aaudio_pcm_stream(substream)->alsa_hw_desc; + -+ /* ensure defaults for tunables */ -+ rog->cpu_default = cpu_default; -+ rog->cpu_min = PPT_CPU_LIMIT_MIN; -+ rog->cpu_max = cpu_max; ++ return 0; ++} + -+ rog->platform_default = platform_default; -+ rog->platform_max = PPT_PLATFORM_MIN; -+ rog->platform_max = platform_max; ++static int aaudio_pcm_close(struct snd_pcm_substream *substream) ++{ ++ pr_debug("aaudio_pcm_close\n"); ++ return 0; ++} + -+ rog->ppt_pl1_spl = cpu_default; -+ rog->ppt_pl2_sppt = cpu_default; -+ rog->ppt_pl3_fppt = cpu_default; -+ rog->ppt_apu_sppt = cpu_default; -+ rog->ppt_platform_sppt = platform_default; ++static int aaudio_pcm_prepare(struct snd_pcm_substream *substream) ++{ ++ return 0; ++} + -+ rog->nv_boost_default = NVIDIA_BOOST_MAX; -+ rog->nv_boost_min = NVIDIA_BOOST_MIN; -+ rog->nv_boost_max = max_boost; -+ rog->nv_dynamic_boost = NVIDIA_BOOST_MIN; ++static int aaudio_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) ++{ ++ struct aaudio_stream *astream = aaudio_pcm_stream(substream); ++ pr_debug("aaudio_pcm_hw_params\n"); + -+ rog->nv_temp_default = NVIDIA_TEMP_MAX; -+ rog->nv_temp_min = NVIDIA_TEMP_MIN; -+ rog->nv_temp_max = NVIDIA_TEMP_MAX; -+ rog->nv_temp_target = NVIDIA_TEMP_MIN; ++ if (!astream->buffer_cnt || !astream->buffers) ++ return -EINVAL; + -+ rog->dgpu_tgp_default = NVIDIA_POWER_DEFAULT; -+ rog->dgpu_tgp_min = NVIDIA_POWER_MIN; -+ rog->dgpu_tgp_max = NVIDIA_POWER_MAX; -+ rog->dgpu_tgp = NVIDIA_POWER_MAX; ++ substream->runtime->dma_area = astream->buffers[0].ptr; ++ substream->runtime->dma_addr = astream->buffers[0].dma_addr; ++ substream->runtime->dma_bytes = astream->buffers[0].size; ++ return 0; +} + -+static int __init asus_fw_init(void) ++static int aaudio_pcm_hw_free(struct snd_pcm_substream *substream) +{ -+ char *wmi_uid; -+ int err; ++ pr_debug("aaudio_pcm_hw_free\n"); ++ return 0; ++} + -+ wmi_uid = wmi_get_acpi_device_uid(ASUS_WMI_MGMT_GUID); -+ if (!wmi_uid) -+ return -ENODEV; ++static void aaudio_pcm_start(struct snd_pcm_substream *substream) ++{ ++ struct aaudio_subdevice *sdev = snd_pcm_substream_chip(substream); ++ struct aaudio_stream *stream = aaudio_pcm_stream(substream); ++ void *buf; ++ size_t s; ++ ktime_t time_start, time_end; ++ bool back_buffer; ++ time_start = ktime_get(); + -+ /* -+ * if equal to "ASUSWMI" then it's DCTS that can't be used for this -+ * driver, DSTS is required. -+ */ -+ if (!strcmp(wmi_uid, ASUS_ACPI_UID_ASUSWMI)) -+ return -ENODEV; ++ back_buffer = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); + -+ asus_armoury.rog_tunables = kzalloc(sizeof(struct rog_tunables), GFP_KERNEL); -+ if (!asus_armoury.rog_tunables) -+ return -ENOMEM; ++ if (back_buffer) { ++ s = frames_to_bytes(substream->runtime, substream->runtime->control->appl_ptr); ++ buf = kmalloc(s, GFP_KERNEL); ++ memcpy_fromio(buf, substream->runtime->dma_area, s); ++ time_end = ktime_get(); ++ pr_debug("aaudio: Backed up the buffer in %lluns [%li]\n", ktime_to_ns(time_end - time_start), ++ substream->runtime->control->appl_ptr); ++ } + -+ init_rog_tunables(asus_armoury.rog_tunables); -+ if (asus_wmi_is_present(ASUS_WMI_DEVID_CORES_MAX)) { -+ err = init_max_cpu_cores(); -+ if (err) { -+ kfree(asus_armoury.rog_tunables); -+ pr_err("Could not initialise CPU core control %d\n", err); -+ return err; -+ } -+ } ++ stream->waiting_for_first_ts = true; ++ stream->frame_min = stream->latency; + -+ err = asus_fw_attr_add(); -+ if (err) -+ return err; ++ aaudio_cmd_start_io(sdev->a, sdev->dev_id); ++ if (back_buffer) ++ memcpy_toio(substream->runtime->dma_area, buf, s); + -+ return 0; ++ time_end = ktime_get(); ++ pr_debug("aaudio: Started the audio device in %lluns\n", ktime_to_ns(time_end - time_start)); +} + -+static void __exit asus_fw_exit(void) ++static int aaudio_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +{ -+ mutex_lock(&asus_armoury.mutex); -+ -+ sysfs_remove_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr); -+ kset_unregister(asus_armoury.fw_attr_kset); -+ device_destroy(fw_attr_class, MKDEV(0, 0)); -+ fw_attributes_class_put(); ++ struct aaudio_subdevice *sdev = snd_pcm_substream_chip(substream); ++ struct aaudio_stream *stream = aaudio_pcm_stream(substream); ++ pr_debug("aaudio_pcm_trigger %x\n", cmd); + -+ mutex_unlock(&asus_armoury.mutex); ++ /* We only supports triggers on the #0 buffer */ ++ if (substream->number != 0) ++ return 0; ++ switch (cmd) { ++ case SNDRV_PCM_TRIGGER_START: ++ aaudio_pcm_start(substream); ++ stream->started = 1; ++ break; ++ case SNDRV_PCM_TRIGGER_STOP: ++ aaudio_cmd_stop_io(sdev->a, sdev->dev_id); ++ stream->started = 0; ++ break; ++ default: ++ return -EINVAL; ++ } ++ return 0; +} + -+module_init(asus_fw_init); -+module_exit(asus_fw_exit); ++static snd_pcm_uframes_t aaudio_pcm_pointer(struct snd_pcm_substream *substream) ++{ ++ struct aaudio_stream *stream = aaudio_pcm_stream(substream); ++ ktime_t time_from_start; ++ snd_pcm_sframes_t frames; ++ snd_pcm_sframes_t buffer_time_length; + -+MODULE_IMPORT_NS("ASUS_WMI"); -+MODULE_AUTHOR("Luke Jones "); -+MODULE_DESCRIPTION("ASUS BIOS Configuration Driver"); -+MODULE_LICENSE("GPL"); -+MODULE_ALIAS("wmi:" ASUS_NB_WMI_EVENT_GUID); -diff --git a/drivers/platform/x86/asus-armoury.h b/drivers/platform/x86/asus-armoury.h -new file mode 100644 -index 000000000000..2620708d3994 ---- /dev/null -+++ b/drivers/platform/x86/asus-armoury.h -@@ -0,0 +1,258 @@ -+/* SPDX-License-Identifier: GPL-2.0 -+ * -+ * Definitions for kernel modules using asus-armoury driver -+ * -+ * Copyright (c) 2024 Luke Jones -+ */ ++ if (!stream->started || stream->waiting_for_first_ts) { ++ pr_warn("aaudio_pcm_pointer while not started\n"); ++ return 0; ++ } + -+#ifndef _ASUS_ARMOURY_H_ -+#define _ASUS_ARMOURY_H_ ++ /* Approximate the pointer based on the last received timestamp */ ++ time_from_start = ktime_get_boottime() - stream->remote_timestamp; ++ buffer_time_length = NSEC_PER_SEC * substream->runtime->buffer_size / substream->runtime->rate; ++ frames = (ktime_to_ns(time_from_start) % buffer_time_length) * substream->runtime->buffer_size / buffer_time_length; ++ if (ktime_to_ns(time_from_start) < buffer_time_length) { ++ if (frames < stream->frame_min) ++ frames = stream->frame_min; ++ else ++ stream->frame_min = 0; ++ } else { ++ if (ktime_to_ns(time_from_start) < 2 * buffer_time_length) ++ stream->frame_min = frames; ++ else ++ stream->frame_min = 0; /* Heavy desync */ ++ } ++ frames -= stream->latency; ++ if (frames < 0) ++ frames += ((-frames - 1) / substream->runtime->buffer_size + 1) * substream->runtime->buffer_size; ++ return (snd_pcm_uframes_t) frames; ++} ++ ++static struct snd_pcm_ops aaudio_pcm_ops = { ++ .open = aaudio_pcm_open, ++ .close = aaudio_pcm_close, ++ .ioctl = snd_pcm_lib_ioctl, ++ .hw_params = aaudio_pcm_hw_params, ++ .hw_free = aaudio_pcm_hw_free, ++ .prepare = aaudio_pcm_prepare, ++ .trigger = aaudio_pcm_trigger, ++ .pointer = aaudio_pcm_pointer, ++ .mmap = snd_pcm_lib_mmap_iomem ++}; + -+#include -+#include ++int aaudio_create_pcm(struct aaudio_subdevice *sdev) ++{ ++ struct snd_pcm *pcm; ++ struct aaudio_alsa_pcm_id_mapping *id_mapping; ++ int err; + -+#define DRIVER_NAME "asus-armoury" ++ if (!sdev->is_pcm || (sdev->in_stream_cnt == 0 && sdev->out_stream_cnt == 0)) { ++ return -EINVAL; ++ } + -+static ssize_t attr_uint_store(struct kobject *kobj, struct kobj_attribute *attr, -+ const char *buf, size_t count, u32 min, u32 max, -+ u32 *store_value, u32 wmi_dev); ++ for (id_mapping = aaudio_alsa_id_mappings; id_mapping->name; id_mapping++) { ++ if (!strcmp(sdev->uid, id_mapping->name)) { ++ sdev->alsa_id = id_mapping->alsa_id; ++ break; ++ } ++ } ++ if (!id_mapping->name) ++ sdev->alsa_id = sdev->a->next_alsa_id++; ++ err = snd_pcm_new(sdev->a->card, sdev->uid, sdev->alsa_id, ++ (int) sdev->out_stream_cnt, (int) sdev->in_stream_cnt, &pcm); ++ if (err < 0) ++ return err; ++ pcm->private_data = sdev; ++ pcm->nonatomic = 1; ++ sdev->pcm = pcm; ++ strcpy(pcm->name, sdev->uid); ++ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &aaudio_pcm_ops); ++ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &aaudio_pcm_ops); ++ return 0; ++} + -+static ssize_t int_type_show(struct kobject *kobj, struct kobj_attribute *attr, -+ char *buf) ++static void aaudio_handle_stream_timestamp(struct snd_pcm_substream *substream, ktime_t timestamp) +{ -+ return sysfs_emit(buf, "integer\n"); ++ unsigned long flags; ++ struct aaudio_stream *stream; ++ ++ stream = aaudio_pcm_stream(substream); ++ snd_pcm_stream_lock_irqsave(substream, flags); ++ stream->remote_timestamp = timestamp; ++ if (stream->waiting_for_first_ts) { ++ stream->waiting_for_first_ts = false; ++ snd_pcm_stream_unlock_irqrestore(substream, flags); ++ return; ++ } ++ snd_pcm_stream_unlock_irqrestore(substream, flags); ++ snd_pcm_period_elapsed(substream); +} + -+static ssize_t enum_type_show(struct kobject *kobj, struct kobj_attribute *attr, -+ char *buf) ++void aaudio_handle_timestamp(struct aaudio_subdevice *sdev, ktime_t os_timestamp, u64 dev_timestamp) +{ -+ return sysfs_emit(buf, "enumeration\n"); ++ struct snd_pcm_substream *substream; ++ ++ substream = sdev->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream; ++ if (substream) ++ aaudio_handle_stream_timestamp(substream, dev_timestamp); ++ substream = sdev->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; ++ if (substream) ++ aaudio_handle_stream_timestamp(substream, os_timestamp); +} +diff --git a/drivers/staging/apple-bce/audio/pcm.h b/drivers/staging/apple-bce/audio/pcm.h +new file mode 100644 +index 000000000000..ea5f35fbe408 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/pcm.h +@@ -0,0 +1,16 @@ ++#ifndef AAUDIO_PCM_H ++#define AAUDIO_PCM_H + -+#define __ASUS_ATTR_RO(_func, _name) \ -+ { \ -+ .attr = { .name = __stringify(_name), .mode = 0444 }, \ -+ .show = _func##_##_name##_show, \ -+ } ++#include ++#include + -+#define __ASUS_ATTR_RO_AS(_name, _show) \ -+ { \ -+ .attr = { .name = __stringify(_name), .mode = 0444 }, \ -+ .show = _show, \ -+ } ++struct aaudio_subdevice; ++struct aaudio_apple_description; ++struct snd_pcm_hardware; + -+#define __ASUS_ATTR_RW(_func, _name) \ -+ __ATTR(_name, 0644, _func##_##_name##_show, _func##_##_name##_store) ++int aaudio_create_hw_info(struct aaudio_apple_description *desc, struct snd_pcm_hardware *alsa_hw, size_t buf_size); ++int aaudio_create_pcm(struct aaudio_subdevice *sdev); + -+#define __WMI_STORE_INT(_attr, _min, _max, _wmi) \ -+ static ssize_t _attr##_store(struct kobject *kobj, \ -+ struct kobj_attribute *attr, \ -+ const char *buf, size_t count) \ -+ { \ -+ return attr_uint_store(kobj, attr, buf, count, _min, _max, \ -+ NULL, _wmi); \ -+ } ++void aaudio_handle_timestamp(struct aaudio_subdevice *sdev, ktime_t os_timestamp, u64 dev_timestamp); + -+#define WMI_SHOW_INT(_attr, _fmt, _wmi) \ -+ static ssize_t _attr##_show(struct kobject *kobj, \ -+ struct kobj_attribute *attr, char *buf) \ -+ { \ -+ u32 result; \ -+ int err; \ -+ \ -+ err = asus_wmi_get_devstate_dsts(_wmi, &result); \ -+ if (err) \ -+ return err; \ -+ return sysfs_emit(buf, _fmt, \ -+ result & ~ASUS_WMI_DSTS_PRESENCE_BIT); \ -+ } ++#endif //AAUDIO_PCM_H +diff --git a/drivers/staging/apple-bce/audio/protocol.c b/drivers/staging/apple-bce/audio/protocol.c +new file mode 100644 +index 000000000000..2314813aeead +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/protocol.c +@@ -0,0 +1,347 @@ ++#include "protocol.h" ++#include "protocol_bce.h" ++#include "audio.h" + -+/* Create functions and attributes for use in other macros or on their own */ ++int aaudio_msg_read_base(struct aaudio_msg *msg, struct aaudio_msg_base *base) ++{ ++ if (msg->size < sizeof(struct aaudio_msg_header) + sizeof(struct aaudio_msg_base) * 2) ++ return -EINVAL; ++ *base = *((struct aaudio_msg_base *) ((struct aaudio_msg_header *) msg->data + 1)); ++ return 0; ++} + -+#define __ATTR_CURRENT_INT_RO(_attr, _wmi) \ -+ WMI_SHOW_INT(_attr##_current_value, "%d\n", _wmi); \ -+ static struct kobj_attribute attr_##_attr##_current_value = \ -+ __ASUS_ATTR_RO(_attr, current_value) ++#define READ_START(type) \ ++ size_t offset = sizeof(struct aaudio_msg_header) + sizeof(struct aaudio_msg_base); (void)offset; \ ++ if (((struct aaudio_msg_base *) ((struct aaudio_msg_header *) msg->data + 1))->msg != type) \ ++ return -EINVAL; ++#define READ_DEVID_VAR(devid) *devid = ((struct aaudio_msg_header *) msg->data)->device_id ++#define READ_VAL(type) ({ offset += sizeof(type); *((type *) ((u8 *) msg->data + offset - sizeof(type))); }) ++#define READ_VAR(type, var) *var = READ_VAL(type) + -+#define __ATTR_CURRENT_INT_RW(_attr, _minv, _maxv, _wmi) \ -+ __WMI_STORE_INT(_attr##_current_value, _minv, _maxv, _wmi); \ -+ WMI_SHOW_INT(_attr##_current_value, "%d\n", _wmi); \ -+ static struct kobj_attribute attr_##_attr##_current_value = \ -+ __ASUS_ATTR_RW(_attr, current_value) ++int aaudio_msg_read_start_io_response(struct aaudio_msg *msg) ++{ ++ READ_START(AAUDIO_MSG_START_IO_RESPONSE); ++ return 0; ++} + -+/* Shows a formatted static variable */ -+#define __ATTR_SHOW_FMT(_prop, _attrname, _fmt, _val) \ -+ static ssize_t _attrname##_##_prop##_show( \ -+ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ -+ { \ -+ return sysfs_emit(buf, _fmt, _val); \ -+ } \ -+ static struct kobj_attribute attr_##_attrname##_##_prop = \ -+ __ASUS_ATTR_RO(_attrname, _prop) ++int aaudio_msg_read_stop_io_response(struct aaudio_msg *msg) ++{ ++ READ_START(AAUDIO_MSG_STOP_IO_RESPONSE); ++ return 0; ++} + -+/* Requires current_value_show */ -+#define __ATTR_GROUP_INT_VALUE_ONLY(_attrname, _fsname, _dispname) \ -+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ -+ static struct kobj_attribute attr_##_attrname##_type = \ -+ __ASUS_ATTR_RO_AS(type, int_type_show); \ -+ static struct attribute *_attrname##_attrs[] = { \ -+ &attr_##_attrname##_current_value.attr, \ -+ &attr_##_attrname##_display_name.attr, \ -+ &attr_##_attrname##_type.attr, NULL \ -+ }; \ -+ static const struct attribute_group _attrname##_attr_group = { \ -+ .name = _fsname, .attrs = _attrname##_attrs \ -+ } ++int aaudio_msg_read_update_timestamp(struct aaudio_msg *msg, aaudio_device_id_t *devid, ++ u64 *timestamp, u64 *update_seed) ++{ ++ READ_START(AAUDIO_MSG_UPDATE_TIMESTAMP); ++ READ_DEVID_VAR(devid); ++ READ_VAR(u64, timestamp); ++ READ_VAR(u64, update_seed); ++ return 0; ++} + -+/* Boolean style enumeration, base macro. Requires adding show/store */ -+#define __ATTR_GROUP_ENUM(_attrname, _fsname, _possible, _dispname) \ -+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ -+ __ATTR_SHOW_FMT(possible_values, _attrname, "%s\n", _possible); \ -+ static struct kobj_attribute attr_##_attrname##_type = \ -+ __ASUS_ATTR_RO_AS(type, enum_type_show); \ -+ static struct attribute *_attrname##_attrs[] = { \ -+ &attr_##_attrname##_current_value.attr, \ -+ &attr_##_attrname##_display_name.attr, \ -+ &attr_##_attrname##_possible_values.attr, \ -+ &attr_##_attrname##_type.attr, \ -+ NULL \ -+ }; \ -+ static const struct attribute_group _attrname##_attr_group = { \ -+ .name = _fsname, .attrs = _attrname##_attrs \ -+ } ++int aaudio_msg_read_get_property_response(struct aaudio_msg *msg, aaudio_object_id_t *obj, ++ struct aaudio_prop_addr *prop, void **data, u64 *data_size) ++{ ++ READ_START(AAUDIO_MSG_GET_PROPERTY_RESPONSE); ++ READ_VAR(aaudio_object_id_t, obj); ++ READ_VAR(u32, &prop->element); ++ READ_VAR(u32, &prop->scope); ++ READ_VAR(u32, &prop->selector); ++ READ_VAR(u64, data_size); ++ *data = ((u8 *) msg->data + offset); ++ /* offset += data_size; */ ++ return 0; ++} + -+#define ATTR_GROUP_INT_VALUE_ONLY_RO(_attrname, _fsname, _wmi, _dispname) \ -+ __ATTR_CURRENT_INT_RO(_attrname, _wmi); \ -+ __ATTR_GROUP_INT_VALUE_ONLY(_attrname, _fsname, _dispname) ++int aaudio_msg_read_set_property_response(struct aaudio_msg *msg, aaudio_object_id_t *obj) ++{ ++ READ_START(AAUDIO_MSG_SET_PROPERTY_RESPONSE); ++ READ_VAR(aaudio_object_id_t, obj); ++ return 0; ++} + -+#define ATTR_GROUP_BOOL_RO(_attrname, _fsname, _wmi, _dispname) \ -+ __ATTR_CURRENT_INT_RO(_attrname, _wmi); \ -+ __ATTR_GROUP_ENUM(_attrname, _fsname, "0;1", _dispname) ++int aaudio_msg_read_property_listener_response(struct aaudio_msg *msg, aaudio_object_id_t *obj, ++ struct aaudio_prop_addr *prop) ++{ ++ READ_START(AAUDIO_MSG_PROPERTY_LISTENER_RESPONSE); ++ READ_VAR(aaudio_object_id_t, obj); ++ READ_VAR(u32, &prop->element); ++ READ_VAR(u32, &prop->scope); ++ READ_VAR(u32, &prop->selector); ++ return 0; ++} + -+#define ATTR_GROUP_BOOL_RW(_attrname, _fsname, _wmi, _dispname) \ -+ __ATTR_CURRENT_INT_RW(_attrname, 0, 1, _wmi); \ -+ __ATTR_GROUP_ENUM(_attrname, _fsname, "0;1", _dispname) ++int aaudio_msg_read_property_changed(struct aaudio_msg *msg, aaudio_device_id_t *devid, aaudio_object_id_t *obj, ++ struct aaudio_prop_addr *prop) ++{ ++ READ_START(AAUDIO_MSG_PROPERTY_CHANGED); ++ READ_DEVID_VAR(devid); ++ READ_VAR(aaudio_object_id_t, obj); ++ READ_VAR(u32, &prop->element); ++ READ_VAR(u32, &prop->scope); ++ READ_VAR(u32, &prop->selector); ++ return 0; ++} + -+/* -+ * Requires _current_value_show(), _current_value_show() -+ */ -+#define ATTR_GROUP_BOOL_CUSTOM(_attrname, _fsname, _dispname) \ -+ static struct kobj_attribute attr_##_attrname##_current_value = \ -+ __ASUS_ATTR_RW(_attrname, current_value); \ -+ __ATTR_GROUP_ENUM(_attrname, _fsname, "0;1", _dispname) ++int aaudio_msg_read_set_input_stream_address_ranges_response(struct aaudio_msg *msg) ++{ ++ READ_START(AAUDIO_MSG_SET_INPUT_STREAM_ADDRESS_RANGES_RESPONSE); ++ return 0; ++} + -+#define ATTR_GROUP_ENUM_INT_RO(_attrname, _fsname, _wmi, _possible, _dispname) \ -+ __ATTR_CURRENT_INT_RO(_attrname, _wmi); \ -+ __ATTR_GROUP_ENUM(_attrname, _fsname, _possible, _dispname) ++int aaudio_msg_read_get_input_stream_list_response(struct aaudio_msg *msg, aaudio_object_id_t **str_l, u64 *str_cnt) ++{ ++ READ_START(AAUDIO_MSG_GET_INPUT_STREAM_LIST_RESPONSE); ++ READ_VAR(u64, str_cnt); ++ *str_l = (aaudio_device_id_t *) ((u8 *) msg->data + offset); ++ /* offset += str_cnt * sizeof(aaudio_object_id_t); */ ++ return 0; ++} + -+/* -+ * Requires _current_value_show(), _current_value_show() -+ * and _possible_values_show() -+ */ -+#define ATTR_GROUP_ENUM_CUSTOM(_attrname, _fsname, _dispname) \ -+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ -+ static struct kobj_attribute attr_##_attrname##_current_value = \ -+ __ASUS_ATTR_RW(_attrname, current_value); \ -+ static struct kobj_attribute attr_##_attrname##_possible_values = \ -+ __ASUS_ATTR_RO(_attrname, possible_values); \ -+ static struct kobj_attribute attr_##_attrname##_type = \ -+ __ASUS_ATTR_RO_AS(type, enum_type_show); \ -+ static struct attribute *_attrname##_attrs[] = { \ -+ &attr_##_attrname##_current_value.attr, \ -+ &attr_##_attrname##_display_name.attr, \ -+ &attr_##_attrname##_possible_values.attr, \ -+ &attr_##_attrname##_type.attr, \ -+ NULL \ -+ }; \ -+ static const struct attribute_group _attrname##_attr_group = { \ -+ .name = _fsname, .attrs = _attrname##_attrs \ -+ } ++int aaudio_msg_read_get_output_stream_list_response(struct aaudio_msg *msg, aaudio_object_id_t **str_l, u64 *str_cnt) ++{ ++ READ_START(AAUDIO_MSG_GET_OUTPUT_STREAM_LIST_RESPONSE); ++ READ_VAR(u64, str_cnt); ++ *str_l = (aaudio_device_id_t *) ((u8 *) msg->data + offset); ++ /* offset += str_cnt * sizeof(aaudio_object_id_t); */ ++ return 0; ++} + -+/* CPU core attributes need a little different in setup */ -+#define ATTR_GROUP_CORES_RW(_attrname, _fsname, _dispname) \ -+ __ATTR_SHOW_FMT(scalar_increment, _attrname, "%d\n", 1); \ -+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ -+ static struct kobj_attribute attr_##_attrname##_current_value = \ -+ __ASUS_ATTR_RW(_attrname, current_value); \ -+ static struct kobj_attribute attr_##_attrname##_default_value = \ -+ __ASUS_ATTR_RO(_attrname, default_value); \ -+ static struct kobj_attribute attr_##_attrname##_min_value = \ -+ __ASUS_ATTR_RO(_attrname, min_value); \ -+ static struct kobj_attribute attr_##_attrname##_max_value = \ -+ __ASUS_ATTR_RO(_attrname, max_value); \ -+ static struct kobj_attribute attr_##_attrname##_type = \ -+ __ASUS_ATTR_RO_AS(type, int_type_show); \ -+ static struct attribute *_attrname##_attrs[] = { \ -+ &attr_##_attrname##_current_value.attr, \ -+ &attr_##_attrname##_default_value.attr, \ -+ &attr_##_attrname##_min_value.attr, \ -+ &attr_##_attrname##_max_value.attr, \ -+ &attr_##_attrname##_scalar_increment.attr, \ -+ &attr_##_attrname##_display_name.attr, \ -+ &attr_##_attrname##_type.attr, \ -+ NULL \ -+ }; \ -+ static const struct attribute_group _attrname##_attr_group = { \ -+ .name = _fsname, .attrs = _attrname##_attrs \ -+ } ++int aaudio_msg_read_set_remote_access_response(struct aaudio_msg *msg) ++{ ++ READ_START(AAUDIO_MSG_SET_REMOTE_ACCESS_RESPONSE); ++ return 0; ++} + -+/* -+ * ROG PPT attributes need a little different in setup as they -+ * require rog_tunables members. -+ */ ++int aaudio_msg_read_get_device_list_response(struct aaudio_msg *msg, aaudio_device_id_t **dev_l, u64 *dev_cnt) ++{ ++ READ_START(AAUDIO_MSG_GET_DEVICE_LIST_RESPONSE); ++ READ_VAR(u64, dev_cnt); ++ *dev_l = (aaudio_device_id_t *) ((u8 *) msg->data + offset); ++ /* offset += dev_cnt * sizeof(aaudio_device_id_t); */ ++ return 0; ++} + -+#define __ROG_TUNABLE_RW(_attr, _min, _max, _wmi) \ -+ static ssize_t _attr##_current_value_store( \ -+ struct kobject *kobj, struct kobj_attribute *attr, \ -+ const char *buf, size_t count) \ -+ { \ -+ return attr_uint_store(kobj, attr, buf, count, \ -+ asus_armoury.rog_tunables->_min, \ -+ asus_armoury.rog_tunables->_max, \ -+ &asus_armoury.rog_tunables->_attr, \ -+ _wmi); \ -+ } \ -+ static ssize_t _attr##_current_value_show( \ -+ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ -+ { \ -+ return sysfs_emit(buf, "%u\n", \ -+ asus_armoury.rog_tunables->_attr); \ -+ } \ -+ static struct kobj_attribute attr_##_attr##_current_value = \ -+ __ASUS_ATTR_RW(_attr, current_value) ++#define WRITE_START_OF_TYPE(typev, devid) \ ++ size_t offset = sizeof(struct aaudio_msg_header); (void) offset; \ ++ ((struct aaudio_msg_header *) msg->data)->type = (typev); \ ++ ((struct aaudio_msg_header *) msg->data)->device_id = (devid); ++#define WRITE_START_COMMAND(devid) WRITE_START_OF_TYPE(AAUDIO_MSG_TYPE_COMMAND, devid) ++#define WRITE_START_RESPONSE() WRITE_START_OF_TYPE(AAUDIO_MSG_TYPE_RESPONSE, 0) ++#define WRITE_START_NOTIFICATION() WRITE_START_OF_TYPE(AAUDIO_MSG_TYPE_NOTIFICATION, 0) ++#define WRITE_VAL(type, value) { *((type *) ((u8 *) msg->data + offset)) = value; offset += sizeof(value); } ++#define WRITE_BIN(value, size) { memcpy((u8 *) msg->data + offset, value, size); offset += size; } ++#define WRITE_BASE(type) WRITE_VAL(u32, type) WRITE_VAL(u32, 0) ++#define WRITE_END() { msg->size = offset; } + -+#define __ROG_TUNABLE_SHOW(_prop, _attrname, _val) \ -+ static ssize_t _attrname##_##_prop##_show( \ -+ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ -+ { \ -+ return sysfs_emit(buf, "%d\n", \ -+ asus_armoury.rog_tunables->_val); \ -+ } \ -+ static struct kobj_attribute attr_##_attrname##_##_prop = \ -+ __ASUS_ATTR_RO(_attrname, _prop) ++void aaudio_msg_write_start_io(struct aaudio_msg *msg, aaudio_device_id_t dev) ++{ ++ WRITE_START_COMMAND(dev); ++ WRITE_BASE(AAUDIO_MSG_START_IO); ++ WRITE_END(); ++} + -+#define ATTR_GROUP_ROG_TUNABLE(_attrname, _fsname, _wmi, _default, _min, _max, \ -+ _incstep, _dispname) \ -+ __ROG_TUNABLE_SHOW(default_value, _attrname, _default); \ -+ __ROG_TUNABLE_RW(_attrname, _min, _max, _wmi); \ -+ __ROG_TUNABLE_SHOW(min_value, _attrname, _min); \ -+ __ROG_TUNABLE_SHOW(max_value, _attrname, _max); \ -+ __ATTR_SHOW_FMT(scalar_increment, _attrname, "%d\n", _incstep); \ -+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ -+ static struct kobj_attribute attr_##_attrname##_type = \ -+ __ASUS_ATTR_RO_AS(type, int_type_show); \ -+ static struct attribute *_attrname##_attrs[] = { \ -+ &attr_##_attrname##_current_value.attr, \ -+ &attr_##_attrname##_default_value.attr, \ -+ &attr_##_attrname##_min_value.attr, \ -+ &attr_##_attrname##_max_value.attr, \ -+ &attr_##_attrname##_scalar_increment.attr, \ -+ &attr_##_attrname##_display_name.attr, \ -+ &attr_##_attrname##_type.attr, \ -+ NULL \ -+ }; \ -+ static const struct attribute_group _attrname##_attr_group = { \ -+ .name = _fsname, .attrs = _attrname##_attrs \ -+ } ++void aaudio_msg_write_stop_io(struct aaudio_msg *msg, aaudio_device_id_t dev) ++{ ++ WRITE_START_COMMAND(dev); ++ WRITE_BASE(AAUDIO_MSG_STOP_IO); ++ WRITE_END(); ++} + -+#endif /* _ASUS_BIOSCFG_H_ */ -diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c -index 8bd187e8b47f..6bd89cd0acdf 100644 ---- a/drivers/platform/x86/asus-wmi.c -+++ b/drivers/platform/x86/asus-wmi.c -@@ -55,8 +55,6 @@ module_param(fnlock_default, bool, 0444); - #define to_asus_wmi_driver(pdrv) \ - (container_of((pdrv), struct asus_wmi_driver, platform_driver)) - --#define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66" -- - #define NOTIFY_BRNUP_MIN 0x11 - #define NOTIFY_BRNUP_MAX 0x1f - #define NOTIFY_BRNDOWN_MIN 0x20 -@@ -105,8 +103,6 @@ module_param(fnlock_default, bool, 0444); - #define USB_INTEL_XUSB2PR 0xD0 - #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 - --#define ASUS_ACPI_UID_ASUSWMI "ASUSWMI" -- - #define WMI_EVENT_MASK 0xFFFF - - #define FAN_CURVE_POINTS 8 -@@ -142,16 +138,20 @@ module_param(fnlock_default, bool, 0444); - #define ASUS_MINI_LED_2024_STRONG 0x01 - #define ASUS_MINI_LED_2024_OFF 0x02 - --/* Controls the power state of the USB0 hub on ROG Ally which input is on */ - #define ASUS_USB0_PWR_EC0_CSEE "\\_SB.PCI0.SBRG.EC0.CSEE" --/* 300ms so far seems to produce a reliable result on AC and battery */ --#define ASUS_USB0_PWR_EC0_CSEE_WAIT 1500 -+/* -+ * The period required to wait after screen off/on/s2idle.check in MS. -+ * Time here greatly impacts the wake behaviour. Used in suspend/wake. -+ */ -+#define ASUS_USB0_PWR_EC0_CSEE_WAIT 600 -+#define ASUS_USB0_PWR_EC0_CSEE_OFF 0xB7 -+#define ASUS_USB0_PWR_EC0_CSEE_ON 0xB8 - - static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL }; - - static int throttle_thermal_policy_write(struct asus_wmi *); - --static const struct dmi_system_id asus_ally_mcu_quirk[] = { -+static const struct dmi_system_id asus_rog_ally_device[] = { - { - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "RC71L"), -@@ -274,9 +274,6 @@ struct asus_wmi { - u32 tablet_switch_dev_id; - bool tablet_switch_inverted; - -- /* The ROG Ally device requires the MCU USB device be disconnected before suspend */ -- bool ally_mcu_usb_switch; -- - enum fan_type fan_type; - enum fan_type gpu_fan_type; - enum fan_type mid_fan_type; -@@ -289,11 +286,12 @@ struct asus_wmi { - u8 fan_boost_mode_mask; - u8 fan_boost_mode; - ++void aaudio_msg_write_get_property(struct aaudio_msg *msg, aaudio_device_id_t dev, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size) ++{ ++ WRITE_START_COMMAND(dev); ++ WRITE_BASE(AAUDIO_MSG_GET_PROPERTY); ++ WRITE_VAL(aaudio_object_id_t, obj); ++ WRITE_VAL(u32, prop.element); ++ WRITE_VAL(u32, prop.scope); ++ WRITE_VAL(u32, prop.selector); ++ WRITE_VAL(u64, qualifier_size); ++ WRITE_BIN(qualifier, qualifier_size); ++ WRITE_END(); ++} + -+ /* Tunables provided by ASUS for gaming laptops */ -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) - bool egpu_enable_available; - bool dgpu_disable_available; - u32 gpu_mux_dev; -- -- /* Tunables provided by ASUS for gaming laptops */ - u32 ppt_pl2_sppt; - u32 ppt_pl1_spl; - u32 ppt_apu_sppt; -@@ -301,6 +299,9 @@ struct asus_wmi { - u32 ppt_fppt; - u32 nv_dynamic_boost; - u32 nv_temp_target; -+ bool panel_overdrive_available; -+ u32 mini_led_dev_id; -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - u32 kbd_rgb_dev; - bool kbd_rgb_state_available; -@@ -319,9 +320,6 @@ struct asus_wmi { - // The RSOC controls the maximum charging percentage. - bool battery_rsoc_available; - -- bool panel_overdrive_available; -- u32 mini_led_dev_id; -- - struct hotplug_slot hotplug_slot; - struct mutex hotplug_lock; - struct mutex wmi_lock; -@@ -335,6 +333,17 @@ struct asus_wmi { - struct asus_wmi_driver *driver; - }; - -+static bool ally_mcu_usb_plug; ++void aaudio_msg_write_set_property(struct aaudio_msg *msg, aaudio_device_id_t dev, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *data, u64 data_size, void *qualifier, u64 qualifier_size) ++{ ++ WRITE_START_COMMAND(dev); ++ WRITE_BASE(AAUDIO_MSG_SET_PROPERTY); ++ WRITE_VAL(aaudio_object_id_t, obj); ++ WRITE_VAL(u32, prop.element); ++ WRITE_VAL(u32, prop.scope); ++ WRITE_VAL(u32, prop.selector); ++ WRITE_VAL(u64, data_size); ++ WRITE_BIN(data, data_size); ++ WRITE_VAL(u64, qualifier_size); ++ WRITE_BIN(qualifier, qualifier_size); ++ WRITE_END(); ++} + -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) -+static void asus_wmi_show_deprecated(void) ++void aaudio_msg_write_property_listener(struct aaudio_msg *msg, aaudio_device_id_t dev, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop) +{ -+ pr_notice_once("Accessing attributes through /sys/bus/platform/asus_wmi " -+ "is deprecated and will be removed in a future release. Please " -+ "switch over to /sys/class/firmware_attributes.\n"); ++ WRITE_START_COMMAND(dev); ++ WRITE_BASE(AAUDIO_MSG_PROPERTY_LISTENER); ++ WRITE_VAL(aaudio_object_id_t, obj); ++ WRITE_VAL(u32, prop.element); ++ WRITE_VAL(u32, prop.scope); ++ WRITE_VAL(u32, prop.selector); ++ WRITE_END(); +} -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + - /* WMI ************************************************************************/ - - static int asus_wmi_evaluate_method3(u32 method_id, -@@ -385,7 +394,7 @@ int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval) - { - return asus_wmi_evaluate_method3(method_id, arg0, arg1, 0, retval); - } --EXPORT_SYMBOL_GPL(asus_wmi_evaluate_method); -+EXPORT_SYMBOL_NS_GPL(asus_wmi_evaluate_method, "ASUS_WMI"); - - static int asus_wmi_evaluate_method5(u32 method_id, - u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 *retval) -@@ -549,12 +558,50 @@ static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval) - return 0; - } - --static int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, -- u32 *retval) -+/** -+ * asus_wmi_get_devstate_dsts() - Get the WMI function state. -+ * @dev_id: The WMI method ID to call. -+ * @retval: A pointer to where to store the value returned from WMI. -+ * -+ * On success the return value is 0, and the retval is a valid value returned -+ * by the successful WMI function call otherwise an error is returned if the -+ * call failed, or if the WMI method ID is unsupported. -+ */ -+int asus_wmi_get_devstate_dsts(u32 dev_id, u32 *retval) ++void aaudio_msg_write_set_input_stream_address_ranges(struct aaudio_msg *msg, aaudio_device_id_t devid) +{ -+ int err; ++ WRITE_START_COMMAND(devid); ++ WRITE_BASE(AAUDIO_MSG_SET_INPUT_STREAM_ADDRESS_RANGES); ++ WRITE_END(); ++} + -+ err = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, dev_id, 0, retval); -+ if (err) -+ return err; ++void aaudio_msg_write_get_input_stream_list(struct aaudio_msg *msg, aaudio_device_id_t devid) ++{ ++ WRITE_START_COMMAND(devid); ++ WRITE_BASE(AAUDIO_MSG_GET_INPUT_STREAM_LIST); ++ WRITE_END(); ++} + -+ if (*retval == ASUS_WMI_UNSUPPORTED_METHOD) -+ return -ENODEV; ++void aaudio_msg_write_get_output_stream_list(struct aaudio_msg *msg, aaudio_device_id_t devid) ++{ ++ WRITE_START_COMMAND(devid); ++ WRITE_BASE(AAUDIO_MSG_GET_OUTPUT_STREAM_LIST); ++ WRITE_END(); ++} + -+ return 0; ++void aaudio_msg_write_set_remote_access(struct aaudio_msg *msg, u64 mode) ++{ ++ WRITE_START_COMMAND(0); ++ WRITE_BASE(AAUDIO_MSG_SET_REMOTE_ACCESS); ++ WRITE_VAL(u64, mode); ++ WRITE_END(); +} -+EXPORT_SYMBOL_NS_GPL(asus_wmi_get_devstate_dsts, "ASUS_WMI"); + -+/** -+ * asus_wmi_set_devstate() - Set the WMI function state. -+ * @dev_id: The WMI function to call. -+ * @ctrl_param: The argument to be used for this WMI function. -+ * @retval: A pointer to where to store the value returned from WMI. -+ * -+ * The returned WMI function state if not checked here for error as -+ * asus_wmi_set_devstate() is not called unless first paired with a call to -+ * asus_wmi_get_devstate_dsts() to check that the WMI function is supported. -+ * -+ * On success the return value is 0, and the retval is a valid value returned -+ * by the successful WMI function call. An error value is returned only if the -+ * WMI function failed. -+ */ -+int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval) - { - return asus_wmi_evaluate_method(ASUS_WMI_METHODID_DEVS, dev_id, - ctrl_param, retval); - } -+EXPORT_SYMBOL_NS_GPL(asus_wmi_set_devstate, "ASUS_WMI"); - - /* Helper for special devices with magic return codes */ - static int asus_wmi_get_devstate_bits(struct asus_wmi *asus, -@@ -687,6 +734,7 @@ static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus) - } - - /* Charging mode, 1=Barrel, 2=USB ******************************************/ -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) - static ssize_t charge_mode_show(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -697,12 +745,16 @@ static ssize_t charge_mode_show(struct device *dev, - if (result < 0) - return result; - -+ asus_wmi_show_deprecated(); ++void aaudio_msg_write_alive_notification(struct aaudio_msg *msg, u32 proto_ver, u32 msg_ver) ++{ ++ WRITE_START_NOTIFICATION(); ++ WRITE_BASE(AAUDIO_MSG_NOTIFICATION_ALIVE); ++ WRITE_VAL(u32, proto_ver); ++ WRITE_VAL(u32, msg_ver); ++ WRITE_END(); ++} + - return sysfs_emit(buf, "%d\n", value & 0xff); - } - - static DEVICE_ATTR_RO(charge_mode); -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - /* dGPU ********************************************************************/ -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) - static ssize_t dgpu_disable_show(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -713,6 +765,8 @@ static ssize_t dgpu_disable_show(struct device *dev, - if (result < 0) - return result; - -+ asus_wmi_show_deprecated(); ++void aaudio_msg_write_update_timestamp_response(struct aaudio_msg *msg) ++{ ++ WRITE_START_RESPONSE(); ++ WRITE_BASE(AAUDIO_MSG_UPDATE_TIMESTAMP_RESPONSE); ++ WRITE_END(); ++} + - return sysfs_emit(buf, "%d\n", result); - } - -@@ -766,8 +820,10 @@ static ssize_t dgpu_disable_store(struct device *dev, - return count; - } - static DEVICE_ATTR_RW(dgpu_disable); -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - /* eGPU ********************************************************************/ -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) - static ssize_t egpu_enable_show(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -778,6 +834,8 @@ static ssize_t egpu_enable_show(struct device *dev, - if (result < 0) - return result; - -+ asus_wmi_show_deprecated(); ++void aaudio_msg_write_get_device_list(struct aaudio_msg *msg) ++{ ++ WRITE_START_COMMAND(0); ++ WRITE_BASE(AAUDIO_MSG_GET_DEVICE_LIST); ++ WRITE_END(); ++} + - return sysfs_emit(buf, "%d\n", result); - } - -@@ -834,8 +892,10 @@ static ssize_t egpu_enable_store(struct device *dev, - return count; - } - static DEVICE_ATTR_RW(egpu_enable); -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - /* Is eGPU connected? *********************************************************/ -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) - static ssize_t egpu_connected_show(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -846,12 +906,16 @@ static ssize_t egpu_connected_show(struct device *dev, - if (result < 0) - return result; - -+ asus_wmi_show_deprecated(); ++#define CMD_SHARED_VARS_NO_REPLY \ ++ int status = 0; \ ++ struct aaudio_send_ctx sctx; ++#define CMD_SHARED_VARS \ ++ CMD_SHARED_VARS_NO_REPLY \ ++ struct aaudio_msg reply = aaudio_reply_alloc(); \ ++ struct aaudio_msg *buf = &reply; ++#define CMD_SEND_REQUEST(fn, ...) \ ++ if ((status = aaudio_send_cmd_sync(a, &sctx, buf, 500, fn, ##__VA_ARGS__))) \ ++ return status; ++#define CMD_DEF_SHARED_AND_SEND(fn, ...) \ ++ CMD_SHARED_VARS \ ++ CMD_SEND_REQUEST(fn, ##__VA_ARGS__); ++#define CMD_DEF_SHARED_NO_REPLY_AND_SEND(fn, ...) \ ++ CMD_SHARED_VARS_NO_REPLY \ ++ CMD_SEND_REQUEST(fn, ##__VA_ARGS__); ++#define CMD_HNDL_REPLY_NO_FREE(fn, ...) \ ++ status = fn(buf, ##__VA_ARGS__); \ ++ return status; ++#define CMD_HNDL_REPLY_AND_FREE(fn, ...) \ ++ status = fn(buf, ##__VA_ARGS__); \ ++ aaudio_reply_free(&reply); \ ++ return status; ++ ++int aaudio_cmd_start_io(struct aaudio_device *a, aaudio_device_id_t devid) ++{ ++ CMD_DEF_SHARED_AND_SEND(aaudio_msg_write_start_io, devid); ++ CMD_HNDL_REPLY_AND_FREE(aaudio_msg_read_start_io_response); ++} ++int aaudio_cmd_stop_io(struct aaudio_device *a, aaudio_device_id_t devid) ++{ ++ CMD_DEF_SHARED_AND_SEND(aaudio_msg_write_stop_io, devid); ++ CMD_HNDL_REPLY_AND_FREE(aaudio_msg_read_stop_io_response); ++} ++int aaudio_cmd_get_property(struct aaudio_device *a, struct aaudio_msg *buf, ++ aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size, void **data, u64 *data_size) ++{ ++ CMD_DEF_SHARED_NO_REPLY_AND_SEND(aaudio_msg_write_get_property, devid, obj, prop, qualifier, qualifier_size); ++ CMD_HNDL_REPLY_NO_FREE(aaudio_msg_read_get_property_response, &obj, &prop, data, data_size); ++} ++int aaudio_cmd_get_primitive_property(struct aaudio_device *a, ++ aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size, void *data, u64 data_size) ++{ ++ int status; ++ struct aaudio_msg reply = aaudio_reply_alloc(); ++ void *r_data; ++ u64 r_data_size; ++ if ((status = aaudio_cmd_get_property(a, &reply, devid, obj, prop, qualifier, qualifier_size, ++ &r_data, &r_data_size))) ++ goto finish; ++ if (r_data_size != data_size) { ++ status = -EINVAL; ++ goto finish; ++ } ++ memcpy(data, r_data, data_size); ++finish: ++ aaudio_reply_free(&reply); ++ return status; ++} ++int aaudio_cmd_set_property(struct aaudio_device *a, aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size, void *data, u64 data_size) ++{ ++ CMD_DEF_SHARED_AND_SEND(aaudio_msg_write_set_property, devid, obj, prop, data, data_size, ++ qualifier, qualifier_size); ++ CMD_HNDL_REPLY_AND_FREE(aaudio_msg_read_set_property_response, &obj); ++} ++int aaudio_cmd_property_listener(struct aaudio_device *a, aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop) ++{ ++ CMD_DEF_SHARED_AND_SEND(aaudio_msg_write_property_listener, devid, obj, prop); ++ CMD_HNDL_REPLY_AND_FREE(aaudio_msg_read_property_listener_response, &obj, &prop); ++} ++int aaudio_cmd_set_input_stream_address_ranges(struct aaudio_device *a, aaudio_device_id_t devid) ++{ ++ CMD_DEF_SHARED_AND_SEND(aaudio_msg_write_set_input_stream_address_ranges, devid); ++ CMD_HNDL_REPLY_AND_FREE(aaudio_msg_read_set_input_stream_address_ranges_response); ++} ++int aaudio_cmd_get_input_stream_list(struct aaudio_device *a, struct aaudio_msg *buf, aaudio_device_id_t devid, ++ aaudio_object_id_t **str_l, u64 *str_cnt) ++{ ++ CMD_DEF_SHARED_NO_REPLY_AND_SEND(aaudio_msg_write_get_input_stream_list, devid); ++ CMD_HNDL_REPLY_NO_FREE(aaudio_msg_read_get_input_stream_list_response, str_l, str_cnt); ++} ++int aaudio_cmd_get_output_stream_list(struct aaudio_device *a, struct aaudio_msg *buf, aaudio_device_id_t devid, ++ aaudio_object_id_t **str_l, u64 *str_cnt) ++{ ++ CMD_DEF_SHARED_NO_REPLY_AND_SEND(aaudio_msg_write_get_output_stream_list, devid); ++ CMD_HNDL_REPLY_NO_FREE(aaudio_msg_read_get_output_stream_list_response, str_l, str_cnt); ++} ++int aaudio_cmd_set_remote_access(struct aaudio_device *a, u64 mode) ++{ ++ CMD_DEF_SHARED_AND_SEND(aaudio_msg_write_set_remote_access, mode); ++ CMD_HNDL_REPLY_AND_FREE(aaudio_msg_read_set_remote_access_response); ++} ++int aaudio_cmd_get_device_list(struct aaudio_device *a, struct aaudio_msg *buf, ++ aaudio_device_id_t **dev_l, u64 *dev_cnt) ++{ ++ CMD_DEF_SHARED_NO_REPLY_AND_SEND(aaudio_msg_write_get_device_list); ++ CMD_HNDL_REPLY_NO_FREE(aaudio_msg_read_get_device_list_response, dev_l, dev_cnt); ++} +\ No newline at end of file +diff --git a/drivers/staging/apple-bce/audio/protocol.h b/drivers/staging/apple-bce/audio/protocol.h +new file mode 100644 +index 000000000000..3427486f3f57 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/protocol.h +@@ -0,0 +1,147 @@ ++#ifndef AAUDIO_PROTOCOL_H ++#define AAUDIO_PROTOCOL_H + - return sysfs_emit(buf, "%d\n", result); - } - - static DEVICE_ATTR_RO(egpu_connected); -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - /* gpu mux switch *************************************************************/ -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) - static ssize_t gpu_mux_mode_show(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -862,6 +926,8 @@ static ssize_t gpu_mux_mode_show(struct device *dev, - if (result < 0) - return result; - -+ asus_wmi_show_deprecated(); ++#include + - return sysfs_emit(buf, "%d\n", result); - } - -@@ -920,6 +986,7 @@ static ssize_t gpu_mux_mode_store(struct device *dev, - return count; - } - static DEVICE_ATTR_RW(gpu_mux_mode); -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - /* TUF Laptop Keyboard RGB Modes **********************************************/ - static ssize_t kbd_rgb_mode_store(struct device *dev, -@@ -1043,6 +1110,7 @@ static const struct attribute_group *kbd_rgb_mode_groups[] = { - }; - - /* Tunable: PPT: Intel=PL1, AMD=SPPT *****************************************/ -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) - static ssize_t ppt_pl2_sppt_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -@@ -1081,6 +1149,8 @@ static ssize_t ppt_pl2_sppt_show(struct device *dev, - { - struct asus_wmi *asus = dev_get_drvdata(dev); - -+ asus_wmi_show_deprecated(); ++struct aaudio_device; + - return sysfs_emit(buf, "%u\n", asus->ppt_pl2_sppt); - } - static DEVICE_ATTR_RW(ppt_pl2_sppt); -@@ -1123,6 +1193,8 @@ static ssize_t ppt_pl1_spl_show(struct device *dev, - { - struct asus_wmi *asus = dev_get_drvdata(dev); - -+ asus_wmi_show_deprecated(); ++typedef u64 aaudio_device_id_t; ++typedef u64 aaudio_object_id_t; + - return sysfs_emit(buf, "%u\n", asus->ppt_pl1_spl); - } - static DEVICE_ATTR_RW(ppt_pl1_spl); -@@ -1166,6 +1238,8 @@ static ssize_t ppt_fppt_show(struct device *dev, - { - struct asus_wmi *asus = dev_get_drvdata(dev); - -+ asus_wmi_show_deprecated(); ++struct aaudio_msg { ++ void *data; ++ size_t size; ++}; + - return sysfs_emit(buf, "%u\n", asus->ppt_fppt); - } - static DEVICE_ATTR_RW(ppt_fppt); -@@ -1209,6 +1283,8 @@ static ssize_t ppt_apu_sppt_show(struct device *dev, - { - struct asus_wmi *asus = dev_get_drvdata(dev); - -+ asus_wmi_show_deprecated(); ++struct __attribute__((packed)) aaudio_msg_header { ++ char tag[4]; ++ u8 type; ++ aaudio_device_id_t device_id; // Idk, use zero for commands? ++}; ++struct __attribute__((packed)) aaudio_msg_base { ++ u32 msg; ++ u32 status; ++}; + - return sysfs_emit(buf, "%u\n", asus->ppt_apu_sppt); - } - static DEVICE_ATTR_RW(ppt_apu_sppt); -@@ -1252,6 +1328,8 @@ static ssize_t ppt_platform_sppt_show(struct device *dev, - { - struct asus_wmi *asus = dev_get_drvdata(dev); - -+ asus_wmi_show_deprecated(); ++struct aaudio_prop_addr { ++ u32 scope; ++ u32 selector; ++ u32 element; ++}; ++#define AAUDIO_PROP(scope, sel, el) (struct aaudio_prop_addr) { scope, sel, el } + - return sysfs_emit(buf, "%u\n", asus->ppt_platform_sppt); - } - static DEVICE_ATTR_RW(ppt_platform_sppt); -@@ -1295,6 +1373,8 @@ static ssize_t nv_dynamic_boost_show(struct device *dev, - { - struct asus_wmi *asus = dev_get_drvdata(dev); - -+ asus_wmi_show_deprecated(); ++enum { ++ AAUDIO_MSG_TYPE_COMMAND = 1, ++ AAUDIO_MSG_TYPE_RESPONSE = 2, ++ AAUDIO_MSG_TYPE_NOTIFICATION = 3 ++}; + - return sysfs_emit(buf, "%u\n", asus->nv_dynamic_boost); - } - static DEVICE_ATTR_RW(nv_dynamic_boost); -@@ -1338,11 +1418,15 @@ static ssize_t nv_temp_target_show(struct device *dev, - { - struct asus_wmi *asus = dev_get_drvdata(dev); - -+ asus_wmi_show_deprecated(); ++enum { ++ AAUDIO_MSG_START_IO = 0, ++ AAUDIO_MSG_START_IO_RESPONSE = 1, ++ AAUDIO_MSG_STOP_IO = 2, ++ AAUDIO_MSG_STOP_IO_RESPONSE = 3, ++ AAUDIO_MSG_UPDATE_TIMESTAMP = 4, ++ AAUDIO_MSG_GET_PROPERTY = 7, ++ AAUDIO_MSG_GET_PROPERTY_RESPONSE = 8, ++ AAUDIO_MSG_SET_PROPERTY = 9, ++ AAUDIO_MSG_SET_PROPERTY_RESPONSE = 10, ++ AAUDIO_MSG_PROPERTY_LISTENER = 11, ++ AAUDIO_MSG_PROPERTY_LISTENER_RESPONSE = 12, ++ AAUDIO_MSG_PROPERTY_CHANGED = 13, ++ AAUDIO_MSG_SET_INPUT_STREAM_ADDRESS_RANGES = 18, ++ AAUDIO_MSG_SET_INPUT_STREAM_ADDRESS_RANGES_RESPONSE = 19, ++ AAUDIO_MSG_GET_INPUT_STREAM_LIST = 24, ++ AAUDIO_MSG_GET_INPUT_STREAM_LIST_RESPONSE = 25, ++ AAUDIO_MSG_GET_OUTPUT_STREAM_LIST = 26, ++ AAUDIO_MSG_GET_OUTPUT_STREAM_LIST_RESPONSE = 27, ++ AAUDIO_MSG_SET_REMOTE_ACCESS = 32, ++ AAUDIO_MSG_SET_REMOTE_ACCESS_RESPONSE = 33, ++ AAUDIO_MSG_UPDATE_TIMESTAMP_RESPONSE = 34, ++ ++ AAUDIO_MSG_NOTIFICATION_ALIVE = 100, ++ AAUDIO_MSG_GET_DEVICE_LIST = 101, ++ AAUDIO_MSG_GET_DEVICE_LIST_RESPONSE = 102, ++ AAUDIO_MSG_NOTIFICATION_BOOT = 104 ++}; + - return sysfs_emit(buf, "%u\n", asus->nv_temp_target); - } - static DEVICE_ATTR_RW(nv_temp_target); -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - /* Ally MCU Powersave ********************************************************/ -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) - static ssize_t mcu_powersave_show(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -1353,6 +1437,8 @@ static ssize_t mcu_powersave_show(struct device *dev, - if (result < 0) - return result; - -+ asus_wmi_show_deprecated(); ++enum { ++ AAUDIO_REMOTE_ACCESS_OFF = 0, ++ AAUDIO_REMOTE_ACCESS_ON = 2 ++}; + - return sysfs_emit(buf, "%d\n", result); - } - -@@ -1388,6 +1474,7 @@ static ssize_t mcu_powersave_store(struct device *dev, - return count; - } - static DEVICE_ATTR_RW(mcu_powersave); -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - /* Battery ********************************************************************/ - -@@ -2261,6 +2348,7 @@ static int asus_wmi_rfkill_init(struct asus_wmi *asus) - } - - /* Panel Overdrive ************************************************************/ -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) - static ssize_t panel_od_show(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -2271,6 +2359,8 @@ static ssize_t panel_od_show(struct device *dev, - if (result < 0) - return result; - -+ asus_wmi_show_deprecated(); ++enum { ++ AAUDIO_PROP_SCOPE_GLOBAL = 0x676c6f62, // 'glob' ++ AAUDIO_PROP_SCOPE_INPUT = 0x696e7074, // 'inpt' ++ AAUDIO_PROP_SCOPE_OUTPUT = 0x6f757470 // 'outp' ++}; + - return sysfs_emit(buf, "%d\n", result); - } - -@@ -2307,9 +2397,10 @@ static ssize_t panel_od_store(struct device *dev, - return count; - } - static DEVICE_ATTR_RW(panel_od); -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - /* Bootup sound ***************************************************************/ -- -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) - static ssize_t boot_sound_show(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -2320,6 +2411,8 @@ static ssize_t boot_sound_show(struct device *dev, - if (result < 0) - return result; - -+ asus_wmi_show_deprecated(); ++enum { ++ AAUDIO_PROP_UID = 0x75696420, // 'uid ' ++ AAUDIO_PROP_BOOL_VALUE = 0x6263766c, // 'bcvl' ++ AAUDIO_PROP_JACK_PLUGGED = 0x6a61636b, // 'jack' ++ AAUDIO_PROP_SEL_VOLUME = 0x64656176, // 'deav' ++ AAUDIO_PROP_LATENCY = 0x6c746e63, // 'ltnc' ++ AAUDIO_PROP_PHYS_FORMAT = 0x70667420 // 'pft ' ++}; + - return sysfs_emit(buf, "%d\n", result); - } - -@@ -2355,8 +2448,10 @@ static ssize_t boot_sound_store(struct device *dev, - return count; - } - static DEVICE_ATTR_RW(boot_sound); -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - /* Mini-LED mode **************************************************************/ -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) - static ssize_t mini_led_mode_show(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -2387,6 +2482,8 @@ static ssize_t mini_led_mode_show(struct device *dev, - } - } - -+ asus_wmi_show_deprecated(); ++int aaudio_msg_read_base(struct aaudio_msg *msg, struct aaudio_msg_base *base); ++ ++int aaudio_msg_read_start_io_response(struct aaudio_msg *msg); ++int aaudio_msg_read_stop_io_response(struct aaudio_msg *msg); ++int aaudio_msg_read_update_timestamp(struct aaudio_msg *msg, aaudio_device_id_t *devid, ++ u64 *timestamp, u64 *update_seed); ++int aaudio_msg_read_get_property_response(struct aaudio_msg *msg, aaudio_object_id_t *obj, ++ struct aaudio_prop_addr *prop, void **data, u64 *data_size); ++int aaudio_msg_read_set_property_response(struct aaudio_msg *msg, aaudio_object_id_t *obj); ++int aaudio_msg_read_property_listener_response(struct aaudio_msg *msg,aaudio_object_id_t *obj, ++ struct aaudio_prop_addr *prop); ++int aaudio_msg_read_property_changed(struct aaudio_msg *msg, aaudio_device_id_t *devid, aaudio_object_id_t *obj, ++ struct aaudio_prop_addr *prop); ++int aaudio_msg_read_set_input_stream_address_ranges_response(struct aaudio_msg *msg); ++int aaudio_msg_read_get_input_stream_list_response(struct aaudio_msg *msg, aaudio_object_id_t **str_l, u64 *str_cnt); ++int aaudio_msg_read_get_output_stream_list_response(struct aaudio_msg *msg, aaudio_object_id_t **str_l, u64 *str_cnt); ++int aaudio_msg_read_set_remote_access_response(struct aaudio_msg *msg); ++int aaudio_msg_read_get_device_list_response(struct aaudio_msg *msg, aaudio_device_id_t **dev_l, u64 *dev_cnt); ++ ++void aaudio_msg_write_start_io(struct aaudio_msg *msg, aaudio_device_id_t dev); ++void aaudio_msg_write_stop_io(struct aaudio_msg *msg, aaudio_device_id_t dev); ++void aaudio_msg_write_get_property(struct aaudio_msg *msg, aaudio_device_id_t dev, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size); ++void aaudio_msg_write_set_property(struct aaudio_msg *msg, aaudio_device_id_t dev, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *data, u64 data_size, void *qualifier, u64 qualifier_size); ++void aaudio_msg_write_property_listener(struct aaudio_msg *msg, aaudio_device_id_t dev, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop); ++void aaudio_msg_write_set_input_stream_address_ranges(struct aaudio_msg *msg, aaudio_device_id_t devid); ++void aaudio_msg_write_get_input_stream_list(struct aaudio_msg *msg, aaudio_device_id_t devid); ++void aaudio_msg_write_get_output_stream_list(struct aaudio_msg *msg, aaudio_device_id_t devid); ++void aaudio_msg_write_set_remote_access(struct aaudio_msg *msg, u64 mode); ++void aaudio_msg_write_alive_notification(struct aaudio_msg *msg, u32 proto_ver, u32 msg_ver); ++void aaudio_msg_write_update_timestamp_response(struct aaudio_msg *msg); ++void aaudio_msg_write_get_device_list(struct aaudio_msg *msg); ++ ++ ++int aaudio_cmd_start_io(struct aaudio_device *a, aaudio_device_id_t devid); ++int aaudio_cmd_stop_io(struct aaudio_device *a, aaudio_device_id_t devid); ++int aaudio_cmd_get_property(struct aaudio_device *a, struct aaudio_msg *buf, ++ aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size, void **data, u64 *data_size); ++int aaudio_cmd_get_primitive_property(struct aaudio_device *a, ++ aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size, void *data, u64 data_size); ++int aaudio_cmd_set_property(struct aaudio_device *a, aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop, void *qualifier, u64 qualifier_size, void *data, u64 data_size); ++int aaudio_cmd_property_listener(struct aaudio_device *a, aaudio_device_id_t devid, aaudio_object_id_t obj, ++ struct aaudio_prop_addr prop); ++int aaudio_cmd_set_input_stream_address_ranges(struct aaudio_device *a, aaudio_device_id_t devid); ++int aaudio_cmd_get_input_stream_list(struct aaudio_device *a, struct aaudio_msg *buf, aaudio_device_id_t devid, ++ aaudio_object_id_t **str_l, u64 *str_cnt); ++int aaudio_cmd_get_output_stream_list(struct aaudio_device *a, struct aaudio_msg *buf, aaudio_device_id_t devid, ++ aaudio_object_id_t **str_l, u64 *str_cnt); ++int aaudio_cmd_set_remote_access(struct aaudio_device *a, u64 mode); ++int aaudio_cmd_get_device_list(struct aaudio_device *a, struct aaudio_msg *buf, ++ aaudio_device_id_t **dev_l, u64 *dev_cnt); ++ ++ ++ ++#endif //AAUDIO_PROTOCOL_H +diff --git a/drivers/staging/apple-bce/audio/protocol_bce.c b/drivers/staging/apple-bce/audio/protocol_bce.c +new file mode 100644 +index 000000000000..28f2dfd44d67 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/protocol_bce.c +@@ -0,0 +1,226 @@ ++#include "protocol_bce.h" + - return sysfs_emit(buf, "%d\n", value); - } - -@@ -2457,10 +2554,13 @@ static ssize_t available_mini_led_mode_show(struct device *dev, - return sysfs_emit(buf, "0 1 2\n"); - } - -+ asus_wmi_show_deprecated(); ++#include "audio.h" + - return sysfs_emit(buf, "0\n"); - } - - static DEVICE_ATTR_RO(available_mini_led_mode); -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - /* Quirks *********************************************************************/ - -@@ -3748,6 +3848,7 @@ static int throttle_thermal_policy_set_default(struct asus_wmi *asus) - return throttle_thermal_policy_write(asus); - } - -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) - static ssize_t throttle_thermal_policy_show(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -3791,6 +3892,7 @@ static ssize_t throttle_thermal_policy_store(struct device *dev, - * Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent - */ - static DEVICE_ATTR_RW(throttle_thermal_policy); -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - /* Platform profile ***********************************************************/ - static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof, -@@ -4388,27 +4490,29 @@ static struct attribute *platform_attributes[] = { - &dev_attr_camera.attr, - &dev_attr_cardr.attr, - &dev_attr_touchpad.attr, -- &dev_attr_charge_mode.attr, -- &dev_attr_egpu_enable.attr, -- &dev_attr_egpu_connected.attr, -- &dev_attr_dgpu_disable.attr, -- &dev_attr_gpu_mux_mode.attr, - &dev_attr_lid_resume.attr, - &dev_attr_als_enable.attr, - &dev_attr_fan_boost_mode.attr, -- &dev_attr_throttle_thermal_policy.attr, -- &dev_attr_ppt_pl2_sppt.attr, -- &dev_attr_ppt_pl1_spl.attr, -- &dev_attr_ppt_fppt.attr, -- &dev_attr_ppt_apu_sppt.attr, -- &dev_attr_ppt_platform_sppt.attr, -- &dev_attr_nv_dynamic_boost.attr, -- &dev_attr_nv_temp_target.attr, -- &dev_attr_mcu_powersave.attr, -- &dev_attr_boot_sound.attr, -- &dev_attr_panel_od.attr, -- &dev_attr_mini_led_mode.attr, -- &dev_attr_available_mini_led_mode.attr, -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) -+ &dev_attr_charge_mode.attr, -+ &dev_attr_egpu_enable.attr, -+ &dev_attr_egpu_connected.attr, -+ &dev_attr_dgpu_disable.attr, -+ &dev_attr_gpu_mux_mode.attr, -+ &dev_attr_ppt_pl2_sppt.attr, -+ &dev_attr_ppt_pl1_spl.attr, -+ &dev_attr_ppt_fppt.attr, -+ &dev_attr_ppt_apu_sppt.attr, -+ &dev_attr_ppt_platform_sppt.attr, -+ &dev_attr_nv_dynamic_boost.attr, -+ &dev_attr_nv_temp_target.attr, -+ &dev_attr_mcu_powersave.attr, -+ &dev_attr_boot_sound.attr, -+ &dev_attr_panel_od.attr, -+ &dev_attr_mini_led_mode.attr, -+ &dev_attr_available_mini_led_mode.attr, -+ &dev_attr_throttle_thermal_policy.attr, -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - NULL - }; - -@@ -4430,7 +4534,11 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, - devid = ASUS_WMI_DEVID_LID_RESUME; - else if (attr == &dev_attr_als_enable.attr) - devid = ASUS_WMI_DEVID_ALS_ENABLE; -- else if (attr == &dev_attr_charge_mode.attr) -+ else if (attr == &dev_attr_fan_boost_mode.attr) -+ ok = asus->fan_boost_mode_available; ++static void aaudio_bce_out_queue_completion(struct bce_queue_sq *sq); ++static void aaudio_bce_in_queue_completion(struct bce_queue_sq *sq); ++static int aaudio_bce_queue_init(struct aaudio_device *dev, struct aaudio_bce_queue *q, const char *name, int direction, ++ bce_sq_completion cfn); ++void aaudio_bce_in_queue_submit_pending(struct aaudio_bce_queue *q, size_t count); + -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) -+ if (attr == &dev_attr_charge_mode.attr) - devid = ASUS_WMI_DEVID_CHARGE_MODE; - else if (attr == &dev_attr_egpu_enable.attr) - ok = asus->egpu_enable_available; -@@ -4468,6 +4576,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, - ok = asus->mini_led_dev_id != 0; - else if (attr == &dev_attr_available_mini_led_mode.attr) - ok = asus->mini_led_dev_id != 0; -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - if (devid != -1) { - ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); -@@ -4707,7 +4816,19 @@ static int asus_wmi_add(struct platform_device *pdev) - if (err) - goto fail_platform; - -+ ally_mcu_usb_plug = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE) -+ && dmi_check_system(asus_rog_ally_device); -+ if (ally_mcu_usb_plug) { -+ /* -+ * These steps ensure the device is in a valid good state, this is -+ * especially important for the Ally 1 after a reboot. -+ */ -+ acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, ASUS_USB0_PWR_EC0_CSEE_ON); -+ msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT); -+ } ++int aaudio_bce_init(struct aaudio_device *dev) ++{ ++ int status; ++ struct aaudio_bce *bce = &dev->bcem; ++ bce->cq = bce_create_cq(dev->bce, 0x80); ++ spin_lock_init(&bce->spinlock); ++ if (!bce->cq) ++ return -EINVAL; ++ if ((status = aaudio_bce_queue_init(dev, &bce->qout, "com.apple.BridgeAudio.IntelToARM", DMA_TO_DEVICE, ++ aaudio_bce_out_queue_completion))) { ++ return status; ++ } ++ if ((status = aaudio_bce_queue_init(dev, &bce->qin, "com.apple.BridgeAudio.ARMToIntel", DMA_FROM_DEVICE, ++ aaudio_bce_in_queue_completion))) { ++ return status; ++ } ++ aaudio_bce_in_queue_submit_pending(&bce->qin, bce->qin.el_count); ++ return 0; ++} + - /* ensure defaults for tunables */ -+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) - asus->ppt_pl2_sppt = 5; - asus->ppt_pl1_spl = 5; - asus->ppt_apu_sppt = 5; -@@ -4719,8 +4840,6 @@ static int asus_wmi_add(struct platform_device *pdev) - asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU); - asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU); - asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE); -- asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE) -- && dmi_check_system(asus_ally_mcu_quirk); - - if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE)) - asus->mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE; -@@ -4731,23 +4850,24 @@ static int asus_wmi_add(struct platform_device *pdev) - asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX; - else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX_VIVO)) - asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX_VIVO; -- -- if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE)) -- asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE; -- else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2)) -- asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2; -+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ - - if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY)) - asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY; - else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO)) - asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO; - -+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE)) -+ asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE; -+ else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2)) -+ asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2; ++int aaudio_bce_queue_init(struct aaudio_device *dev, struct aaudio_bce_queue *q, const char *name, int direction, ++ bce_sq_completion cfn) ++{ ++ q->cq = dev->bcem.cq; ++ q->el_size = AAUDIO_BCE_QUEUE_ELEMENT_SIZE; ++ q->el_count = AAUDIO_BCE_QUEUE_ELEMENT_COUNT; ++ /* NOTE: The Apple impl uses 0x80 as the queue size, however we use 21 (in fact 20) to simplify the impl */ ++ q->sq = bce_create_sq(dev->bce, q->cq, name, (u32) (q->el_count + 1), direction, cfn, dev); ++ if (!q->sq) ++ return -EINVAL; + - err = fan_boost_mode_check_present(asus); - if (err) - goto fail_fan_boost_mode; - - err = platform_profile_setup(asus); -- if (err) -+ if (err && err != -EEXIST) - goto fail_platform_profile_setup; - - err = asus_wmi_sysfs_init(asus->platform_device); -@@ -4911,34 +5031,6 @@ static int asus_hotk_resume(struct device *device) - return 0; - } - --static int asus_hotk_resume_early(struct device *device) --{ -- struct asus_wmi *asus = dev_get_drvdata(device); -- -- if (asus->ally_mcu_usb_switch) { -- /* sleep required to prevent USB0 being yanked then reappearing rapidly */ -- if (ACPI_FAILURE(acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, 0xB8))) -- dev_err(device, "ROG Ally MCU failed to connect USB dev\n"); -- else -- msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT); -- } -- return 0; --} -- --static int asus_hotk_prepare(struct device *device) --{ -- struct asus_wmi *asus = dev_get_drvdata(device); -- -- if (asus->ally_mcu_usb_switch) { -- /* sleep required to ensure USB0 is disabled before sleep continues */ -- if (ACPI_FAILURE(acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, 0xB7))) -- dev_err(device, "ROG Ally MCU failed to disconnect USB dev\n"); -- else -- msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT); -- } -- return 0; --} -- - static int asus_hotk_restore(struct device *device) - { - struct asus_wmi *asus = dev_get_drvdata(device); -@@ -4979,11 +5071,32 @@ static int asus_hotk_restore(struct device *device) - return 0; - } - -+static void asus_ally_s2idle_restore(void) ++ q->data = dma_alloc_coherent(&dev->bce->pci->dev, q->el_size * q->el_count, &q->dma_addr, GFP_KERNEL); ++ if (!q->data) { ++ bce_destroy_sq(dev->bce, q->sq); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static void aaudio_send_create_tag(struct aaudio_bce *b, int *tagn, char tag[4]) +{ -+ if (ally_mcu_usb_plug) { -+ acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, ASUS_USB0_PWR_EC0_CSEE_ON); -+ msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT); -+ } ++ char tag_zero[5]; ++ b->tag_num = (b->tag_num + 1) % AAUDIO_BCE_QUEUE_TAG_COUNT; ++ *tagn = b->tag_num; ++ snprintf(tag_zero, 5, "S%03d", b->tag_num); ++ *((u32 *) tag) = *((u32 *) tag_zero); +} + -+static int asus_hotk_prepare(struct device *device) ++int __aaudio_send_prepare(struct aaudio_bce *b, struct aaudio_send_ctx *ctx, char *tag) +{ -+ if (ally_mcu_usb_plug) { -+ acpi_execute_simple_method(NULL, ASUS_USB0_PWR_EC0_CSEE, ASUS_USB0_PWR_EC0_CSEE_OFF); -+ msleep(ASUS_USB0_PWR_EC0_CSEE_WAIT); -+ } -+ return 0; ++ int status; ++ size_t index; ++ void *dptr; ++ struct aaudio_msg_header *header; ++ if ((status = bce_reserve_submission(b->qout.sq, &ctx->timeout))) ++ return status; ++ spin_lock_irqsave(&b->spinlock, ctx->irq_flags); ++ index = b->qout.data_tail; ++ dptr = (u8 *) b->qout.data + index * b->qout.el_size; ++ ctx->msg.data = dptr; ++ header = dptr; ++ if (tag) ++ *((u32 *) header->tag) = *((u32 *) tag); ++ else ++ aaudio_send_create_tag(b, &ctx->tag_n, header->tag); ++ return 0; +} + -+/* Use only for Ally devices due to the wake_on_ac */ -+static struct acpi_s2idle_dev_ops asus_ally_s2idle_dev_ops = { -+ .restore = asus_ally_s2idle_restore, -+}; ++void __aaudio_send(struct aaudio_bce *b, struct aaudio_send_ctx *ctx) ++{ ++ struct bce_qe_submission *s = bce_next_submission(b->qout.sq); ++#ifdef DEBUG ++ pr_debug("aaudio: Sending command data\n"); ++ print_hex_dump(KERN_DEBUG, "aaudio:OUT ", DUMP_PREFIX_NONE, 32, 1, ctx->msg.data, ctx->msg.size, true); ++#endif ++ bce_set_submission_single(s, b->qout.dma_addr + (dma_addr_t) (ctx->msg.data - b->qout.data), ctx->msg.size); ++ bce_submit_to_device(b->qout.sq); ++ b->qout.data_tail = (b->qout.data_tail + 1) % b->qout.el_count; ++ spin_unlock_irqrestore(&b->spinlock, ctx->irq_flags); ++} ++ ++int __aaudio_send_cmd_sync(struct aaudio_bce *b, struct aaudio_send_ctx *ctx, struct aaudio_msg *reply) ++{ ++ struct aaudio_bce_queue_entry ent; ++ DECLARE_COMPLETION_ONSTACK(cmpl); ++ ent.msg = reply; ++ ent.cmpl = &cmpl; ++ b->pending_entries[ctx->tag_n] = &ent; ++ __aaudio_send(b, ctx); /* unlocks the spinlock */ ++ ctx->timeout = wait_for_completion_timeout(&cmpl, ctx->timeout); ++ if (ctx->timeout == 0) { ++ /* Remove the pending queue entry; this will be normally handled by the completion route but ++ * during a timeout it won't */ ++ spin_lock_irqsave(&b->spinlock, ctx->irq_flags); ++ if (b->pending_entries[ctx->tag_n] == &ent) ++ b->pending_entries[ctx->tag_n] = NULL; ++ spin_unlock_irqrestore(&b->spinlock, ctx->irq_flags); ++ return -ETIMEDOUT; ++ } ++ return 0; ++} + - static const struct dev_pm_ops asus_pm_ops = { - .thaw = asus_hotk_thaw, - .restore = asus_hotk_restore, - .resume = asus_hotk_resume, -- .resume_early = asus_hotk_resume_early, - .prepare = asus_hotk_prepare, - }; - -@@ -5011,6 +5124,10 @@ static int asus_wmi_probe(struct platform_device *pdev) - return ret; - } - -+ ret = acpi_register_lps0_dev(&asus_ally_s2idle_dev_ops); -+ if (ret) -+ pr_warn("failed to register LPS0 sleep handler in asus-wmi\n"); ++static void aaudio_handle_reply(struct aaudio_bce *b, struct aaudio_msg *reply) ++{ ++ const char *tag; ++ int tagn; ++ unsigned long irq_flags; ++ char tag_zero[5]; ++ struct aaudio_bce_queue_entry *entry; + - return asus_wmi_add(pdev); - } - -@@ -5043,6 +5160,7 @@ EXPORT_SYMBOL_GPL(asus_wmi_register_driver); - - void asus_wmi_unregister_driver(struct asus_wmi_driver *driver) - { -+ acpi_unregister_lps0_dev(&asus_ally_s2idle_dev_ops); - platform_device_unregister(driver->platform_device); - platform_driver_unregister(&driver->platform_driver); - used = false; -diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig -index 37c24ffea65c..bd52d1e081b7 100644 ---- a/drivers/scsi/Kconfig -+++ b/drivers/scsi/Kconfig -@@ -1522,4 +1522,6 @@ endif # SCSI_LOWLEVEL - - source "drivers/scsi/device_handler/Kconfig" - -+source "drivers/scsi/vhba/Kconfig" ++ tag = ((struct aaudio_msg_header *) reply->data)->tag; ++ if (tag[0] != 'S') { ++ pr_err("aaudio_handle_reply: Unexpected tag: %.4s\n", tag); ++ return; ++ } ++ *((u32 *) tag_zero) = *((u32 *) tag); ++ tag_zero[4] = 0; ++ if (kstrtoint(&tag_zero[1], 10, &tagn)) { ++ pr_err("aaudio_handle_reply: Tag parse failed: %.4s\n", tag); ++ return; ++ } + - endmenu -diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile -index 1313ddf2fd1a..5942e8f79159 100644 ---- a/drivers/scsi/Makefile -+++ b/drivers/scsi/Makefile -@@ -153,6 +153,7 @@ obj-$(CONFIG_CHR_DEV_SCH) += ch.o - obj-$(CONFIG_SCSI_ENCLOSURE) += ses.o - - obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas/ -+obj-$(CONFIG_VHBA) += vhba/ - - # This goes last, so that "real" scsi devices probe earlier - obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o -diff --git a/drivers/scsi/vhba/Kconfig b/drivers/scsi/vhba/Kconfig -new file mode 100644 -index 000000000000..e70a381fe3df ---- /dev/null -+++ b/drivers/scsi/vhba/Kconfig -@@ -0,0 +1,9 @@ -+config VHBA -+ tristate "Virtual (SCSI) Host Bus Adapter" -+ depends on SCSI -+ help -+ This is the in-kernel part of CDEmu, a CD/DVD-ROM device -+ emulator. ++ spin_lock_irqsave(&b->spinlock, irq_flags); ++ entry = b->pending_entries[tagn]; ++ if (entry) { ++ if (reply->size < entry->msg->size) ++ entry->msg->size = reply->size; ++ memcpy(entry->msg->data, reply->data, entry->msg->size); ++ complete(entry->cmpl); + -+ This driver can also be built as a module. If so, the module -+ will be called vhba. -diff --git a/drivers/scsi/vhba/Makefile b/drivers/scsi/vhba/Makefile -new file mode 100644 -index 000000000000..2d7524b66199 ---- /dev/null -+++ b/drivers/scsi/vhba/Makefile -@@ -0,0 +1,4 @@ -+VHBA_VERSION := 20240917 ++ b->pending_entries[tagn] = NULL; ++ } else { ++ pr_err("aaudio_handle_reply: No queued item found for tag: %.4s\n", tag); ++ } ++ spin_unlock_irqrestore(&b->spinlock, irq_flags); ++} + -+obj-$(CONFIG_VHBA) += vhba.o -+ccflags-y := -DVHBA_VERSION=\"$(VHBA_VERSION)\" -Werror -diff --git a/drivers/scsi/vhba/vhba.c b/drivers/scsi/vhba/vhba.c -new file mode 100644 -index 000000000000..7531223355e5 ---- /dev/null -+++ b/drivers/scsi/vhba/vhba.c -@@ -0,0 +1,1130 @@ -+/* -+ * vhba.c -+ * -+ * Copyright (C) 2007-2012 Chia-I Wu -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -+ */ ++static void aaudio_bce_out_queue_completion(struct bce_queue_sq *sq) ++{ ++ while (bce_next_completion(sq)) { ++ //pr_info("aaudio: Send confirmed\n"); ++ bce_notify_submission_complete(sq); ++ } ++} + -+#define pr_fmt(fmt) "vhba: " fmt ++static void aaudio_bce_in_queue_handle_msg(struct aaudio_device *a, struct aaudio_msg *msg); + -+#include ++static void aaudio_bce_in_queue_completion(struct bce_queue_sq *sq) ++{ ++ struct aaudio_msg msg; ++ struct aaudio_device *dev = sq->userdata; ++ struct aaudio_bce_queue *q = &dev->bcem.qin; ++ struct bce_sq_completion_data *c; ++ size_t cnt = 0; + -+#include -+#include -+#include -+#include -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) -+#include -+#else -+#include -+#endif -+#include -+#include -+#include -+#include -+#include -+#ifdef CONFIG_COMPAT -+#include ++ mb(); ++ while ((c = bce_next_completion(sq))) { ++ msg.data = (u8 *) q->data + q->data_head * q->el_size; ++ msg.size = c->data_size; ++#ifdef DEBUG ++ pr_debug("aaudio: Received command data %llx\n", c->data_size); ++ print_hex_dump(KERN_DEBUG, "aaudio:IN ", DUMP_PREFIX_NONE, 32, 1, msg.data, min(msg.size, 128UL), true); +#endif -+#include -+#include -+#include -+#include -+#include -+#include ++ aaudio_bce_in_queue_handle_msg(dev, &msg); + ++ q->data_head = (q->data_head + 1) % q->el_count; + -+MODULE_AUTHOR("Chia-I Wu"); -+MODULE_VERSION(VHBA_VERSION); -+MODULE_DESCRIPTION("Virtual SCSI HBA"); -+MODULE_LICENSE("GPL"); ++ bce_notify_submission_complete(sq); ++ ++cnt; ++ } ++ aaudio_bce_in_queue_submit_pending(q, cnt); ++} + ++static void aaudio_bce_in_queue_handle_msg(struct aaudio_device *a, struct aaudio_msg *msg) ++{ ++ struct aaudio_msg_header *header = (struct aaudio_msg_header *) msg->data; ++ if (msg->size < sizeof(struct aaudio_msg_header)) { ++ pr_err("aaudio: Msg size smaller than header (%lx)", msg->size); ++ return; ++ } ++ if (header->type == AAUDIO_MSG_TYPE_RESPONSE) { ++ aaudio_handle_reply(&a->bcem, msg); ++ } else if (header->type == AAUDIO_MSG_TYPE_COMMAND) { ++ aaudio_handle_command(a, msg); ++ } else if (header->type == AAUDIO_MSG_TYPE_NOTIFICATION) { ++ aaudio_handle_notification(a, msg); ++ } ++} + -+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0) -+#define sdev_dbg(sdev, fmt, a...) \ -+ dev_dbg(&(sdev)->sdev_gendev, fmt, ##a) -+#define scmd_dbg(scmd, fmt, a...) \ -+ dev_dbg(&(scmd)->device->sdev_gendev, fmt, ##a) -+#endif ++void aaudio_bce_in_queue_submit_pending(struct aaudio_bce_queue *q, size_t count) ++{ ++ struct bce_qe_submission *s; ++ while (count--) { ++ if (bce_reserve_submission(q->sq, NULL)) { ++ pr_err("aaudio: Failed to reserve an event queue submission\n"); ++ break; ++ } ++ s = bce_next_submission(q->sq); ++ bce_set_submission_single(s, q->dma_addr + (dma_addr_t) (q->data_tail * q->el_size), q->el_size); ++ q->data_tail = (q->data_tail + 1) % q->el_count; ++ } ++ bce_submit_to_device(q->sq); ++} + -+#define VHBA_MAX_SECTORS_PER_IO 256 -+#define VHBA_MAX_BUS 16 -+#define VHBA_MAX_ID 16 -+#define VHBA_MAX_DEVICES (VHBA_MAX_BUS * (VHBA_MAX_ID-1)) -+#define VHBA_KBUF_SIZE PAGE_SIZE ++struct aaudio_msg aaudio_reply_alloc(void) ++{ ++ struct aaudio_msg ret; ++ ret.size = AAUDIO_BCE_QUEUE_ELEMENT_SIZE; ++ ret.data = kmalloc(ret.size, GFP_KERNEL); ++ return ret; ++} + -+#define DATA_TO_DEVICE(dir) ((dir) == DMA_TO_DEVICE || (dir) == DMA_BIDIRECTIONAL) -+#define DATA_FROM_DEVICE(dir) ((dir) == DMA_FROM_DEVICE || (dir) == DMA_BIDIRECTIONAL) ++void aaudio_reply_free(struct aaudio_msg *reply) ++{ ++ kfree(reply->data); ++} +diff --git a/drivers/staging/apple-bce/audio/protocol_bce.h b/drivers/staging/apple-bce/audio/protocol_bce.h +new file mode 100644 +index 000000000000..14d26c05ddf9 +--- /dev/null ++++ b/drivers/staging/apple-bce/audio/protocol_bce.h +@@ -0,0 +1,72 @@ ++#ifndef AAUDIO_PROTOCOL_BCE_H ++#define AAUDIO_PROTOCOL_BCE_H + ++#include "protocol.h" ++#include "../queue.h" + -+static int vhba_can_queue = 32; -+module_param_named(can_queue, vhba_can_queue, int, 0); ++#define AAUDIO_BCE_QUEUE_ELEMENT_SIZE 0x1000 ++#define AAUDIO_BCE_QUEUE_ELEMENT_COUNT 20 + ++#define AAUDIO_BCE_QUEUE_TAG_COUNT 1000 + -+enum vhba_req_state { -+ VHBA_REQ_FREE, -+ VHBA_REQ_PENDING, -+ VHBA_REQ_READING, -+ VHBA_REQ_SENT, -+ VHBA_REQ_WRITING, ++struct aaudio_device; ++ ++struct aaudio_bce_queue_entry { ++ struct aaudio_msg *msg; ++ struct completion *cmpl; ++}; ++struct aaudio_bce_queue { ++ struct bce_queue_cq *cq; ++ struct bce_queue_sq *sq; ++ void *data; ++ dma_addr_t dma_addr; ++ size_t data_head, data_tail; ++ size_t el_size, el_count; ++}; ++struct aaudio_bce { ++ struct bce_queue_cq *cq; ++ struct aaudio_bce_queue qin; ++ struct aaudio_bce_queue qout; ++ int tag_num; ++ struct aaudio_bce_queue_entry *pending_entries[AAUDIO_BCE_QUEUE_TAG_COUNT]; ++ struct spinlock spinlock; +}; + -+struct vhba_command { -+ struct scsi_cmnd *cmd; -+ /* metatags are per-host. not to be confused with -+ queue tags that are usually per-lun */ -+ unsigned long metatag; ++struct aaudio_send_ctx { + int status; -+ struct list_head entry; ++ int tag_n; ++ unsigned long irq_flags; ++ struct aaudio_msg msg; ++ unsigned long timeout; +}; + -+struct vhba_device { -+ unsigned int num; -+ spinlock_t cmd_lock; -+ struct list_head cmd_list; -+ wait_queue_head_t cmd_wq; -+ atomic_t refcnt; ++int aaudio_bce_init(struct aaudio_device *dev); ++int __aaudio_send_prepare(struct aaudio_bce *b, struct aaudio_send_ctx *ctx, char *tag); ++void __aaudio_send(struct aaudio_bce *b, struct aaudio_send_ctx *ctx); ++int __aaudio_send_cmd_sync(struct aaudio_bce *b, struct aaudio_send_ctx *ctx, struct aaudio_msg *reply); ++ ++#define aaudio_send_with_tag(a, ctx, tag, tout, fn, ...) ({ \ ++ (ctx)->timeout = msecs_to_jiffies(tout); \ ++ (ctx)->status = __aaudio_send_prepare(&(a)->bcem, (ctx), (tag)); \ ++ if (!(ctx)->status) { \ ++ fn(&(ctx)->msg, ##__VA_ARGS__); \ ++ __aaudio_send(&(a)->bcem, (ctx)); \ ++ } \ ++ (ctx)->status; \ ++}) ++#define aaudio_send(a, ctx, tout, fn, ...) aaudio_send_with_tag(a, ctx, NULL, tout, fn, ##__VA_ARGS__) ++ ++#define aaudio_send_cmd_sync(a, ctx, reply, tout, fn, ...) ({ \ ++ (ctx)->timeout = msecs_to_jiffies(tout); \ ++ (ctx)->status = __aaudio_send_prepare(&(a)->bcem, (ctx), NULL); \ ++ if (!(ctx)->status) { \ ++ fn(&(ctx)->msg, ##__VA_ARGS__); \ ++ (ctx)->status = __aaudio_send_cmd_sync(&(a)->bcem, (ctx), (reply)); \ ++ } \ ++ (ctx)->status; \ ++}) ++ ++struct aaudio_msg aaudio_reply_alloc(void); ++void aaudio_reply_free(struct aaudio_msg *reply); ++ ++#endif //AAUDIO_PROTOCOL_BCE_H +diff --git a/drivers/staging/apple-bce/mailbox.c b/drivers/staging/apple-bce/mailbox.c +new file mode 100644 +index 000000000000..e24bd35215c0 +--- /dev/null ++++ b/drivers/staging/apple-bce/mailbox.c +@@ -0,0 +1,151 @@ ++#include "mailbox.h" ++#include ++#include "apple_bce.h" + -+ unsigned char *kbuf; -+ size_t kbuf_size; -+}; ++#define REG_MBOX_OUT_BASE 0x820 ++#define REG_MBOX_REPLY_COUNTER 0x108 ++#define REG_MBOX_REPLY_BASE 0x810 ++#define REG_TIMESTAMP_BASE 0xC000 + -+struct vhba_host { -+ struct Scsi_Host *shost; -+ spinlock_t cmd_lock; -+ int cmd_next; -+ struct vhba_command *commands; -+ spinlock_t dev_lock; -+ struct vhba_device *devices[VHBA_MAX_DEVICES]; -+ int num_devices; -+ DECLARE_BITMAP(chgmap, VHBA_MAX_DEVICES); -+ int chgtype[VHBA_MAX_DEVICES]; -+ struct work_struct scan_devices; -+}; ++#define BCE_MBOX_TIMEOUT_MS 200 + -+#define MAX_COMMAND_SIZE 16 ++void bce_mailbox_init(struct bce_mailbox *mb, void __iomem *reg_mb) ++{ ++ mb->reg_mb = reg_mb; ++ init_completion(&mb->mb_completion); ++} + -+struct vhba_request { -+ __u32 metatag; -+ __u32 lun; -+ __u8 cdb[MAX_COMMAND_SIZE]; -+ __u8 cdb_len; -+ __u32 data_len; ++int bce_mailbox_send(struct bce_mailbox *mb, u64 msg, u64* recv) ++{ ++ u32 __iomem *regb; ++ ++ if (atomic_cmpxchg(&mb->mb_status, 0, 1) != 0) { ++ return -EEXIST; // We don't support two messages at once ++ } ++ reinit_completion(&mb->mb_completion); ++ ++ pr_debug("bce_mailbox_send: %llx\n", msg); ++ regb = (u32*) ((u8*) mb->reg_mb + REG_MBOX_OUT_BASE); ++ iowrite32((u32) msg, regb); ++ iowrite32((u32) (msg >> 32), regb + 1); ++ iowrite32(0, regb + 2); ++ iowrite32(0, regb + 3); ++ ++ wait_for_completion_timeout(&mb->mb_completion, msecs_to_jiffies(BCE_MBOX_TIMEOUT_MS)); ++ if (atomic_read(&mb->mb_status) != 2) { // Didn't get the reply ++ atomic_set(&mb->mb_status, 0); ++ return -ETIMEDOUT; ++ } ++ ++ *recv = mb->mb_result; ++ pr_debug("bce_mailbox_send: reply %llx\n", *recv); ++ ++ atomic_set(&mb->mb_status, 0); ++ return 0; ++} ++ ++static int bce_mailbox_retrive_response(struct bce_mailbox *mb) ++{ ++ u32 __iomem *regb; ++ u32 lo, hi; ++ int count, counter; ++ u32 res = ioread32((u8*) mb->reg_mb + REG_MBOX_REPLY_COUNTER); ++ count = (res >> 20) & 0xf; ++ counter = count; ++ pr_debug("bce_mailbox_retrive_response count=%i\n", count); ++ while (counter--) { ++ regb = (u32*) ((u8*) mb->reg_mb + REG_MBOX_REPLY_BASE); ++ lo = ioread32(regb); ++ hi = ioread32(regb + 1); ++ ioread32(regb + 2); ++ ioread32(regb + 3); ++ pr_debug("bce_mailbox_retrive_response %llx\n", ((u64) hi << 32) | lo); ++ mb->mb_result = ((u64) hi << 32) | lo; ++ } ++ return count > 0 ? 0 : -ENODATA; ++} ++ ++int bce_mailbox_handle_interrupt(struct bce_mailbox *mb) ++{ ++ int status = bce_mailbox_retrive_response(mb); ++ if (!status) { ++ atomic_set(&mb->mb_status, 2); ++ complete(&mb->mb_completion); ++ } ++ return status; ++} ++ ++static void bc_send_timestamp(struct timer_list *tl); ++ ++void bce_timestamp_init(struct bce_timestamp *ts, void __iomem *reg) ++{ ++ u32 __iomem *regb; ++ ++ spin_lock_init(&ts->stop_sl); ++ ts->stopped = false; ++ ++ ts->reg = reg; ++ ++ regb = (u32*) ((u8*) ts->reg + REG_TIMESTAMP_BASE); ++ ++ ioread32(regb); ++ mb(); ++ ++ timer_setup(&ts->timer, bc_send_timestamp, 0); ++} ++ ++void bce_timestamp_start(struct bce_timestamp *ts, bool is_initial) ++{ ++ unsigned long flags; ++ u32 __iomem *regb = (u32*) ((u8*) ts->reg + REG_TIMESTAMP_BASE); ++ ++ if (is_initial) { ++ iowrite32((u32) -4, regb + 2); ++ iowrite32((u32) -1, regb); ++ } else { ++ iowrite32((u32) -3, regb + 2); ++ iowrite32((u32) -1, regb); ++ } ++ ++ spin_lock_irqsave(&ts->stop_sl, flags); ++ ts->stopped = false; ++ spin_unlock_irqrestore(&ts->stop_sl, flags); ++ mod_timer(&ts->timer, jiffies + msecs_to_jiffies(150)); ++} ++ ++void bce_timestamp_stop(struct bce_timestamp *ts) ++{ ++ unsigned long flags; ++ u32 __iomem *regb = (u32*) ((u8*) ts->reg + REG_TIMESTAMP_BASE); ++ ++ spin_lock_irqsave(&ts->stop_sl, flags); ++ ts->stopped = true; ++ spin_unlock_irqrestore(&ts->stop_sl, flags); ++ del_timer_sync(&ts->timer); ++ ++ iowrite32((u32) -2, regb + 2); ++ iowrite32((u32) -1, regb); ++} ++ ++static void bc_send_timestamp(struct timer_list *tl) ++{ ++ struct bce_timestamp *ts; ++ unsigned long flags; ++ u32 __iomem *regb; ++ ktime_t bt; ++ ++ ts = container_of(tl, struct bce_timestamp, timer); ++ regb = (u32*) ((u8*) ts->reg + REG_TIMESTAMP_BASE); ++ local_irq_save(flags); ++ ioread32(regb + 2); ++ mb(); ++ bt = ktime_get_boottime(); ++ iowrite32((u32) bt, regb + 2); ++ iowrite32((u32) (bt >> 32), regb); ++ ++ spin_lock(&ts->stop_sl); ++ if (!ts->stopped) ++ mod_timer(&ts->timer, jiffies + msecs_to_jiffies(150)); ++ spin_unlock(&ts->stop_sl); ++ local_irq_restore(flags); ++} +\ No newline at end of file +diff --git a/drivers/staging/apple-bce/mailbox.h b/drivers/staging/apple-bce/mailbox.h +new file mode 100644 +index 000000000000..f3323f95ba51 +--- /dev/null ++++ b/drivers/staging/apple-bce/mailbox.h +@@ -0,0 +1,53 @@ ++#ifndef BCE_MAILBOX_H ++#define BCE_MAILBOX_H ++ ++#include ++#include ++#include ++ ++struct bce_mailbox { ++ void __iomem *reg_mb; ++ ++ atomic_t mb_status; // possible statuses: 0 (no msg), 1 (has active msg), 2 (got reply) ++ struct completion mb_completion; ++ uint64_t mb_result; +}; + -+struct vhba_response { -+ __u32 metatag; -+ __u32 status; -+ __u32 data_len; ++enum bce_message_type { ++ BCE_MB_REGISTER_COMMAND_SQ = 0x7, // to-device ++ BCE_MB_REGISTER_COMMAND_CQ = 0x8, // to-device ++ BCE_MB_REGISTER_COMMAND_QUEUE_REPLY = 0xB, // to-host ++ BCE_MB_SET_FW_PROTOCOL_VERSION = 0xC, // both ++ BCE_MB_SLEEP_NO_STATE = 0x14, // to-device ++ BCE_MB_RESTORE_NO_STATE = 0x15, // to-device ++ BCE_MB_SAVE_STATE_AND_SLEEP = 0x17, // to-device ++ BCE_MB_RESTORE_STATE_AND_WAKE = 0x18, // to-device ++ BCE_MB_SAVE_STATE_AND_SLEEP_FAILURE = 0x19, // from-device ++ BCE_MB_SAVE_RESTORE_STATE_COMPLETE = 0x1A, // from-device +}; + ++#define BCE_MB_MSG(type, value) (((u64) (type) << 58) | ((value) & 0x3FFFFFFFFFFFFFFLL)) ++#define BCE_MB_TYPE(v) ((u32) (v >> 58)) ++#define BCE_MB_VALUE(v) (v & 0x3FFFFFFFFFFFFFFLL) + ++void bce_mailbox_init(struct bce_mailbox *mb, void __iomem *reg_mb); + -+static struct vhba_command *vhba_alloc_command (void); -+static void vhba_free_command (struct vhba_command *vcmd); ++int bce_mailbox_send(struct bce_mailbox *mb, u64 msg, u64* recv); + -+static struct platform_device vhba_platform_device; ++int bce_mailbox_handle_interrupt(struct bce_mailbox *mb); + + ++struct bce_timestamp { ++ void __iomem *reg; ++ struct timer_list timer; ++ struct spinlock stop_sl; ++ bool stopped; ++}; + -+/* These functions define a symmetric 1:1 mapping between device numbers and -+ the bus and id. We have reserved the last id per bus for the host itself. */ -+static void devnum_to_bus_and_id(unsigned int devnum, unsigned int *bus, unsigned int *id) ++void bce_timestamp_init(struct bce_timestamp *ts, void __iomem *reg); ++ ++void bce_timestamp_start(struct bce_timestamp *ts, bool is_initial); ++ ++void bce_timestamp_stop(struct bce_timestamp *ts); ++ ++#endif //BCEDRIVER_MAILBOX_H +diff --git a/drivers/staging/apple-bce/queue.c b/drivers/staging/apple-bce/queue.c +new file mode 100644 +index 000000000000..bc9cd3bc6f0c +--- /dev/null ++++ b/drivers/staging/apple-bce/queue.c +@@ -0,0 +1,390 @@ ++#include "queue.h" ++#include "apple_bce.h" ++ ++#define REG_DOORBELL_BASE 0x44000 ++ ++struct bce_queue_cq *bce_alloc_cq(struct apple_bce_device *dev, int qid, u32 el_count) ++{ ++ struct bce_queue_cq *q; ++ q = kzalloc(sizeof(struct bce_queue_cq), GFP_KERNEL); ++ q->qid = qid; ++ q->type = BCE_QUEUE_CQ; ++ q->el_count = el_count; ++ q->data = dma_alloc_coherent(&dev->pci->dev, el_count * sizeof(struct bce_qe_completion), ++ &q->dma_handle, GFP_KERNEL); ++ if (!q->data) { ++ pr_err("DMA queue memory alloc failed\n"); ++ kfree(q); ++ return NULL; ++ } ++ return q; ++} ++ ++void bce_get_cq_memcfg(struct bce_queue_cq *cq, struct bce_queue_memcfg *cfg) +{ -+ *bus = devnum / (VHBA_MAX_ID-1); -+ *id = devnum % (VHBA_MAX_ID-1); ++ cfg->qid = (u16) cq->qid; ++ cfg->el_count = (u16) cq->el_count; ++ cfg->vector_or_cq = 0; ++ cfg->_pad = 0; ++ cfg->addr = cq->dma_handle; ++ cfg->length = cq->el_count * sizeof(struct bce_qe_completion); +} + -+static unsigned int bus_and_id_to_devnum(unsigned int bus, unsigned int id) ++void bce_free_cq(struct apple_bce_device *dev, struct bce_queue_cq *cq) +{ -+ return (bus * (VHBA_MAX_ID-1)) + id; ++ dma_free_coherent(&dev->pci->dev, cq->el_count * sizeof(struct bce_qe_completion), cq->data, cq->dma_handle); ++ kfree(cq); +} + -+static struct vhba_device *vhba_device_alloc (void) ++static void bce_handle_cq_completion(struct apple_bce_device *dev, struct bce_qe_completion *e, size_t *ce) +{ -+ struct vhba_device *vdev; ++ struct bce_queue *target; ++ struct bce_queue_sq *target_sq; ++ struct bce_sq_completion_data *cmpl; ++ if (e->qid >= BCE_MAX_QUEUE_COUNT) { ++ pr_err("Device sent a response for qid (%u) >= BCE_MAX_QUEUE_COUNT\n", e->qid); ++ return; ++ } ++ target = dev->queues[e->qid]; ++ if (!target || target->type != BCE_QUEUE_SQ) { ++ pr_err("Device sent a response for qid (%u), which does not exist\n", e->qid); ++ return; ++ } ++ target_sq = (struct bce_queue_sq *) target; ++ if (target_sq->completion_tail != e->completion_index) { ++ pr_err("Completion index mismatch; this is likely going to make this driver unusable\n"); ++ return; ++ } ++ if (!target_sq->has_pending_completions) { ++ target_sq->has_pending_completions = true; ++ dev->int_sq_list[(*ce)++] = target_sq; ++ } ++ cmpl = &target_sq->completion_data[e->completion_index]; ++ cmpl->status = e->status; ++ cmpl->data_size = e->data_size; ++ cmpl->result = e->result; ++ wmb(); ++ target_sq->completion_tail = (target_sq->completion_tail + 1) % target_sq->el_count; ++} ++ ++void bce_handle_cq_completions(struct apple_bce_device *dev, struct bce_queue_cq *cq) ++{ ++ size_t ce = 0; ++ struct bce_qe_completion *e; ++ struct bce_queue_sq *sq; ++ e = bce_cq_element(cq, cq->index); ++ if (!(e->flags & BCE_COMPLETION_FLAG_PENDING)) ++ return; ++ mb(); ++ while (true) { ++ e = bce_cq_element(cq, cq->index); ++ if (!(e->flags & BCE_COMPLETION_FLAG_PENDING)) ++ break; ++ // pr_info("apple-bce: compl: %i: %i %llx %llx", e->qid, e->status, e->data_size, e->result); ++ bce_handle_cq_completion(dev, e, &ce); ++ e->flags = 0; ++ cq->index = (cq->index + 1) % cq->el_count; ++ } ++ mb(); ++ iowrite32(cq->index, (u32 *) ((u8 *) dev->reg_mem_dma + REG_DOORBELL_BASE) + cq->qid); ++ while (ce) { ++ --ce; ++ sq = dev->int_sq_list[ce]; ++ sq->completion(sq); ++ sq->has_pending_completions = false; ++ } ++} + -+ vdev = kzalloc(sizeof(struct vhba_device), GFP_KERNEL); -+ if (!vdev) { ++ ++struct bce_queue_sq *bce_alloc_sq(struct apple_bce_device *dev, int qid, u32 el_size, u32 el_count, ++ bce_sq_completion compl, void *userdata) ++{ ++ struct bce_queue_sq *q; ++ q = kzalloc(sizeof(struct bce_queue_sq), GFP_KERNEL); ++ q->qid = qid; ++ q->type = BCE_QUEUE_SQ; ++ q->el_size = el_size; ++ q->el_count = el_count; ++ q->data = dma_alloc_coherent(&dev->pci->dev, el_count * el_size, ++ &q->dma_handle, GFP_KERNEL); ++ q->completion = compl; ++ q->userdata = userdata; ++ q->completion_data = kzalloc(sizeof(struct bce_sq_completion_data) * el_count, GFP_KERNEL); ++ q->reg_mem_dma = dev->reg_mem_dma; ++ atomic_set(&q->available_commands, el_count - 1); ++ init_completion(&q->available_command_completion); ++ atomic_set(&q->available_command_completion_waiting_count, 0); ++ if (!q->data) { ++ pr_err("DMA queue memory alloc failed\n"); ++ kfree(q); + return NULL; + } ++ return q; ++} + -+ spin_lock_init(&vdev->cmd_lock); -+ INIT_LIST_HEAD(&vdev->cmd_list); -+ init_waitqueue_head(&vdev->cmd_wq); -+ atomic_set(&vdev->refcnt, 1); -+ -+ vdev->kbuf = NULL; -+ vdev->kbuf_size = 0; ++void bce_get_sq_memcfg(struct bce_queue_sq *sq, struct bce_queue_cq *cq, struct bce_queue_memcfg *cfg) ++{ ++ cfg->qid = (u16) sq->qid; ++ cfg->el_count = (u16) sq->el_count; ++ cfg->vector_or_cq = (u16) cq->qid; ++ cfg->_pad = 0; ++ cfg->addr = sq->dma_handle; ++ cfg->length = sq->el_count * sq->el_size; ++} + -+ return vdev; ++void bce_free_sq(struct apple_bce_device *dev, struct bce_queue_sq *sq) ++{ ++ dma_free_coherent(&dev->pci->dev, sq->el_count * sq->el_size, sq->data, sq->dma_handle); ++ kfree(sq); +} + -+static void vhba_device_put (struct vhba_device *vdev) ++int bce_reserve_submission(struct bce_queue_sq *sq, unsigned long *timeout) +{ -+ if (atomic_dec_and_test(&vdev->refcnt)) { -+ kfree(vdev); ++ while (atomic_dec_if_positive(&sq->available_commands) < 0) { ++ if (!timeout || !*timeout) ++ return -EAGAIN; ++ atomic_inc(&sq->available_command_completion_waiting_count); ++ *timeout = wait_for_completion_timeout(&sq->available_command_completion, *timeout); ++ if (!*timeout) { ++ if (atomic_dec_if_positive(&sq->available_command_completion_waiting_count) < 0) ++ try_wait_for_completion(&sq->available_command_completion); /* consume the pending completion */ ++ } + } ++ return 0; +} + -+static struct vhba_device *vhba_device_get (struct vhba_device *vdev) ++void bce_cancel_submission_reservation(struct bce_queue_sq *sq) +{ -+ atomic_inc(&vdev->refcnt); -+ -+ return vdev; ++ atomic_inc(&sq->available_commands); +} + -+static int vhba_device_queue (struct vhba_device *vdev, struct scsi_cmnd *cmd) ++void *bce_next_submission(struct bce_queue_sq *sq) +{ -+ struct vhba_host *vhost; -+ struct vhba_command *vcmd; -+ unsigned long flags; ++ void *ret = bce_sq_element(sq, sq->tail); ++ sq->tail = (sq->tail + 1) % sq->el_count; ++ return ret; ++} + -+ vhost = platform_get_drvdata(&vhba_platform_device); ++void bce_submit_to_device(struct bce_queue_sq *sq) ++{ ++ mb(); ++ iowrite32(sq->tail, (u32 *) ((u8 *) sq->reg_mem_dma + REG_DOORBELL_BASE) + sq->qid); ++} + -+ vcmd = vhba_alloc_command(); -+ if (!vcmd) { -+ return SCSI_MLQUEUE_HOST_BUSY; ++void bce_notify_submission_complete(struct bce_queue_sq *sq) ++{ ++ sq->head = (sq->head + 1) % sq->el_count; ++ atomic_inc(&sq->available_commands); ++ if (atomic_dec_if_positive(&sq->available_command_completion_waiting_count) >= 0) { ++ complete(&sq->available_command_completion); + } ++} + -+ vcmd->cmd = cmd; ++void bce_set_submission_single(struct bce_qe_submission *element, dma_addr_t addr, size_t size) ++{ ++ element->addr = addr; ++ element->length = size; ++ element->segl_addr = element->segl_length = 0; ++} + -+ spin_lock_irqsave(&vdev->cmd_lock, flags); -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) -+ vcmd->metatag = scsi_cmd_to_rq(vcmd->cmd)->tag; -+#else -+ vcmd->metatag = vcmd->cmd->request->tag; -+#endif -+ list_add_tail(&vcmd->entry, &vdev->cmd_list); -+ spin_unlock_irqrestore(&vdev->cmd_lock, flags); ++static void bce_cmdq_completion(struct bce_queue_sq *q); + -+ wake_up_interruptible(&vdev->cmd_wq); ++struct bce_queue_cmdq *bce_alloc_cmdq(struct apple_bce_device *dev, int qid, u32 el_count) ++{ ++ struct bce_queue_cmdq *q; ++ q = kzalloc(sizeof(struct bce_queue_cmdq), GFP_KERNEL); ++ q->sq = bce_alloc_sq(dev, qid, BCE_CMD_SIZE, el_count, bce_cmdq_completion, q); ++ if (!q->sq) { ++ kfree(q); ++ return NULL; ++ } ++ spin_lock_init(&q->lck); ++ q->tres = kzalloc(sizeof(struct bce_queue_cmdq_result_el*) * el_count, GFP_KERNEL); ++ if (!q->tres) { ++ kfree(q); ++ return NULL; ++ } ++ return q; ++} + -+ return 0; ++void bce_free_cmdq(struct apple_bce_device *dev, struct bce_queue_cmdq *cmdq) ++{ ++ bce_free_sq(dev, cmdq->sq); ++ kfree(cmdq->tres); ++ kfree(cmdq); +} + -+static int vhba_device_dequeue (struct vhba_device *vdev, struct scsi_cmnd *cmd) ++void bce_cmdq_completion(struct bce_queue_sq *q) +{ -+ struct vhba_command *vcmd; -+ int retval; -+ unsigned long flags; ++ struct bce_queue_cmdq_result_el *el; ++ struct bce_queue_cmdq *cmdq = q->userdata; ++ struct bce_sq_completion_data *result; + -+ spin_lock_irqsave(&vdev->cmd_lock, flags); -+ list_for_each_entry(vcmd, &vdev->cmd_list, entry) { -+ if (vcmd->cmd == cmd) { -+ list_del_init(&vcmd->entry); -+ break; ++ spin_lock(&cmdq->lck); ++ while ((result = bce_next_completion(q))) { ++ el = cmdq->tres[cmdq->sq->head]; ++ if (el) { ++ el->result = result->result; ++ el->status = result->status; ++ mb(); ++ complete(&el->cmpl); ++ } else { ++ pr_err("apple-bce: Unexpected command queue completion\n"); + } ++ cmdq->tres[cmdq->sq->head] = NULL; ++ bce_notify_submission_complete(q); + } ++ spin_unlock(&cmdq->lck); ++} + -+ /* command not found */ -+ if (&vcmd->entry == &vdev->cmd_list) { -+ spin_unlock_irqrestore(&vdev->cmd_lock, flags); -+ return SUCCESS; ++static __always_inline void *bce_cmd_start(struct bce_queue_cmdq *cmdq, struct bce_queue_cmdq_result_el *res) ++{ ++ void *ret; ++ unsigned long timeout; ++ init_completion(&res->cmpl); ++ mb(); ++ ++ timeout = msecs_to_jiffies(1000L * 60 * 5); /* wait for up to ~5 minutes */ ++ if (bce_reserve_submission(cmdq->sq, &timeout)) ++ return NULL; ++ ++ spin_lock(&cmdq->lck); ++ cmdq->tres[cmdq->sq->tail] = res; ++ ret = bce_next_submission(cmdq->sq); ++ return ret; ++} ++ ++static __always_inline void bce_cmd_finish(struct bce_queue_cmdq *cmdq, struct bce_queue_cmdq_result_el *res) ++{ ++ bce_submit_to_device(cmdq->sq); ++ spin_unlock(&cmdq->lck); ++ ++ wait_for_completion(&res->cmpl); ++ mb(); ++} ++ ++u32 bce_cmd_register_queue(struct bce_queue_cmdq *cmdq, struct bce_queue_memcfg *cfg, const char *name, bool isdirout) ++{ ++ struct bce_queue_cmdq_result_el res; ++ struct bce_cmdq_register_memory_queue_cmd *cmd = bce_cmd_start(cmdq, &res); ++ if (!cmd) ++ return (u32) -1; ++ cmd->cmd = BCE_CMD_REGISTER_MEMORY_QUEUE; ++ cmd->flags = (u16) ((name ? 2 : 0) | (isdirout ? 1 : 0)); ++ cmd->qid = cfg->qid; ++ cmd->el_count = cfg->el_count; ++ cmd->vector_or_cq = cfg->vector_or_cq; ++ memset(cmd->name, 0, sizeof(cmd->name)); ++ if (name) { ++ cmd->name_len = (u16) min(strlen(name), (size_t) sizeof(cmd->name)); ++ memcpy(cmd->name, name, cmd->name_len); ++ } else { ++ cmd->name_len = 0; + } ++ cmd->addr = cfg->addr; ++ cmd->length = cfg->length; + -+ while (vcmd->status == VHBA_REQ_READING || vcmd->status == VHBA_REQ_WRITING) { -+ spin_unlock_irqrestore(&vdev->cmd_lock, flags); -+ scmd_dbg(cmd, "wait for I/O before aborting\n"); -+ schedule_timeout(1); -+ spin_lock_irqsave(&vdev->cmd_lock, flags); ++ bce_cmd_finish(cmdq, &res); ++ return res.status; ++} ++ ++u32 bce_cmd_unregister_memory_queue(struct bce_queue_cmdq *cmdq, u16 qid) ++{ ++ struct bce_queue_cmdq_result_el res; ++ struct bce_cmdq_simple_memory_queue_cmd *cmd = bce_cmd_start(cmdq, &res); ++ if (!cmd) ++ return (u32) -1; ++ cmd->cmd = BCE_CMD_UNREGISTER_MEMORY_QUEUE; ++ cmd->flags = 0; ++ cmd->qid = qid; ++ bce_cmd_finish(cmdq, &res); ++ return res.status; ++} ++ ++u32 bce_cmd_flush_memory_queue(struct bce_queue_cmdq *cmdq, u16 qid) ++{ ++ struct bce_queue_cmdq_result_el res; ++ struct bce_cmdq_simple_memory_queue_cmd *cmd = bce_cmd_start(cmdq, &res); ++ if (!cmd) ++ return (u32) -1; ++ cmd->cmd = BCE_CMD_FLUSH_MEMORY_QUEUE; ++ cmd->flags = 0; ++ cmd->qid = qid; ++ bce_cmd_finish(cmdq, &res); ++ return res.status; ++} ++ ++ ++struct bce_queue_cq *bce_create_cq(struct apple_bce_device *dev, u32 el_count) ++{ ++ struct bce_queue_cq *cq; ++ struct bce_queue_memcfg cfg; ++ int qid = ida_simple_get(&dev->queue_ida, BCE_QUEUE_USER_MIN, BCE_QUEUE_USER_MAX, GFP_KERNEL); ++ if (qid < 0) ++ return NULL; ++ cq = bce_alloc_cq(dev, qid, el_count); ++ if (!cq) ++ return NULL; ++ bce_get_cq_memcfg(cq, &cfg); ++ if (bce_cmd_register_queue(dev->cmd_cmdq, &cfg, NULL, false) != 0) { ++ pr_err("apple-bce: CQ registration failed (%i)", qid); ++ bce_free_cq(dev, cq); ++ ida_simple_remove(&dev->queue_ida, (uint) qid); ++ return NULL; ++ } ++ dev->queues[qid] = (struct bce_queue *) cq; ++ return cq; ++} ++ ++struct bce_queue_sq *bce_create_sq(struct apple_bce_device *dev, struct bce_queue_cq *cq, const char *name, u32 el_count, ++ int direction, bce_sq_completion compl, void *userdata) ++{ ++ struct bce_queue_sq *sq; ++ struct bce_queue_memcfg cfg; ++ int qid; ++ if (cq == NULL) ++ return NULL; /* cq can not be null */ ++ if (name == NULL) ++ return NULL; /* name can not be null */ ++ if (direction != DMA_TO_DEVICE && direction != DMA_FROM_DEVICE) ++ return NULL; /* unsupported direction */ ++ qid = ida_simple_get(&dev->queue_ida, BCE_QUEUE_USER_MIN, BCE_QUEUE_USER_MAX, GFP_KERNEL); ++ if (qid < 0) ++ return NULL; ++ sq = bce_alloc_sq(dev, qid, sizeof(struct bce_qe_submission), el_count, compl, userdata); ++ if (!sq) ++ return NULL; ++ bce_get_sq_memcfg(sq, cq, &cfg); ++ if (bce_cmd_register_queue(dev->cmd_cmdq, &cfg, name, direction != DMA_FROM_DEVICE) != 0) { ++ pr_err("apple-bce: SQ registration failed (%i)", qid); ++ bce_free_sq(dev, sq); ++ ida_simple_remove(&dev->queue_ida, (uint) qid); ++ return NULL; + } ++ spin_lock(&dev->queues_lock); ++ dev->queues[qid] = (struct bce_queue *) sq; ++ spin_unlock(&dev->queues_lock); ++ return sq; ++} + -+ retval = (vcmd->status == VHBA_REQ_SENT) ? FAILED : SUCCESS; ++void bce_destroy_cq(struct apple_bce_device *dev, struct bce_queue_cq *cq) ++{ ++ if (!dev->is_being_removed && bce_cmd_unregister_memory_queue(dev->cmd_cmdq, (u16) cq->qid)) ++ pr_err("apple-bce: CQ unregister failed"); ++ spin_lock(&dev->queues_lock); ++ dev->queues[cq->qid] = NULL; ++ spin_unlock(&dev->queues_lock); ++ ida_simple_remove(&dev->queue_ida, (uint) cq->qid); ++ bce_free_cq(dev, cq); ++} + -+ vhba_free_command(vcmd); ++void bce_destroy_sq(struct apple_bce_device *dev, struct bce_queue_sq *sq) ++{ ++ if (!dev->is_being_removed && bce_cmd_unregister_memory_queue(dev->cmd_cmdq, (u16) sq->qid)) ++ pr_err("apple-bce: CQ unregister failed"); ++ spin_lock(&dev->queues_lock); ++ dev->queues[sq->qid] = NULL; ++ spin_unlock(&dev->queues_lock); ++ ida_simple_remove(&dev->queue_ida, (uint) sq->qid); ++ bce_free_sq(dev, sq); ++} +\ No newline at end of file +diff --git a/drivers/staging/apple-bce/queue.h b/drivers/staging/apple-bce/queue.h +new file mode 100644 +index 000000000000..8368ac5dfca8 +--- /dev/null ++++ b/drivers/staging/apple-bce/queue.h +@@ -0,0 +1,177 @@ ++#ifndef BCE_QUEUE_H ++#define BCE_QUEUE_H + -+ spin_unlock_irqrestore(&vdev->cmd_lock, flags); ++#include ++#include + -+ return retval; ++#define BCE_CMD_SIZE 0x40 ++ ++struct apple_bce_device; ++ ++enum bce_queue_type { ++ BCE_QUEUE_CQ, BCE_QUEUE_SQ ++}; ++struct bce_queue { ++ int qid; ++ int type; ++}; ++struct bce_queue_cq { ++ int qid; ++ int type; ++ u32 el_count; ++ dma_addr_t dma_handle; ++ void *data; ++ ++ u32 index; ++}; ++struct bce_queue_sq; ++typedef void (*bce_sq_completion)(struct bce_queue_sq *q); ++struct bce_sq_completion_data { ++ u32 status; ++ u64 data_size; ++ u64 result; ++}; ++struct bce_queue_sq { ++ int qid; ++ int type; ++ u32 el_size; ++ u32 el_count; ++ dma_addr_t dma_handle; ++ void *data; ++ void *userdata; ++ void __iomem *reg_mem_dma; ++ ++ atomic_t available_commands; ++ struct completion available_command_completion; ++ atomic_t available_command_completion_waiting_count; ++ u32 head, tail; ++ ++ u32 completion_cidx, completion_tail; ++ struct bce_sq_completion_data *completion_data; ++ bool has_pending_completions; ++ bce_sq_completion completion; ++}; ++ ++struct bce_queue_cmdq_result_el { ++ struct completion cmpl; ++ u32 status; ++ u64 result; ++}; ++struct bce_queue_cmdq { ++ struct bce_queue_sq *sq; ++ struct spinlock lck; ++ struct bce_queue_cmdq_result_el **tres; ++}; ++ ++struct bce_queue_memcfg { ++ u16 qid; ++ u16 el_count; ++ u16 vector_or_cq; ++ u16 _pad; ++ u64 addr; ++ u64 length; ++}; ++ ++enum bce_qe_completion_status { ++ BCE_COMPLETION_SUCCESS = 0, ++ BCE_COMPLETION_ERROR = 1, ++ BCE_COMPLETION_ABORTED = 2, ++ BCE_COMPLETION_NO_SPACE = 3, ++ BCE_COMPLETION_OVERRUN = 4 ++}; ++enum bce_qe_completion_flags { ++ BCE_COMPLETION_FLAG_PENDING = 0x8000 ++}; ++struct bce_qe_completion { ++ u64 result; ++ u64 data_size; ++ u16 qid; ++ u16 completion_index; ++ u16 status; // bce_qe_completion_status ++ u16 flags; // bce_qe_completion_flags ++}; ++ ++struct bce_qe_submission { ++ u64 length; ++ u64 addr; ++ ++ u64 segl_addr; ++ u64 segl_length; ++}; ++ ++enum bce_cmdq_command { ++ BCE_CMD_REGISTER_MEMORY_QUEUE = 0x20, ++ BCE_CMD_UNREGISTER_MEMORY_QUEUE = 0x30, ++ BCE_CMD_FLUSH_MEMORY_QUEUE = 0x40, ++ BCE_CMD_SET_MEMORY_QUEUE_PROPERTY = 0x50 ++}; ++struct bce_cmdq_simple_memory_queue_cmd { ++ u16 cmd; // bce_cmdq_command ++ u16 flags; ++ u16 qid; ++}; ++struct bce_cmdq_register_memory_queue_cmd { ++ u16 cmd; // bce_cmdq_command ++ u16 flags; ++ u16 qid; ++ u16 _pad; ++ u16 el_count; ++ u16 vector_or_cq; ++ u16 _pad2; ++ u16 name_len; ++ char name[0x20]; ++ u64 addr; ++ u64 length; ++}; ++ ++static __always_inline void *bce_sq_element(struct bce_queue_sq *q, int i) { ++ return (void *) ((u8 *) q->data + q->el_size * i); ++} ++static __always_inline void *bce_cq_element(struct bce_queue_cq *q, int i) { ++ return (void *) ((struct bce_qe_completion *) q->data + i); +} + -+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) -+static int vhba_slave_alloc(struct scsi_device *sdev) ++static __always_inline struct bce_sq_completion_data *bce_next_completion(struct bce_queue_sq *sq) { ++ struct bce_sq_completion_data *res; ++ rmb(); ++ if (sq->completion_cidx == sq->completion_tail) ++ return NULL; ++ res = &sq->completion_data[sq->completion_cidx]; ++ sq->completion_cidx = (sq->completion_cidx + 1) % sq->el_count; ++ return res; ++} ++ ++struct bce_queue_cq *bce_alloc_cq(struct apple_bce_device *dev, int qid, u32 el_count); ++void bce_get_cq_memcfg(struct bce_queue_cq *cq, struct bce_queue_memcfg *cfg); ++void bce_free_cq(struct apple_bce_device *dev, struct bce_queue_cq *cq); ++void bce_handle_cq_completions(struct apple_bce_device *dev, struct bce_queue_cq *cq); ++ ++struct bce_queue_sq *bce_alloc_sq(struct apple_bce_device *dev, int qid, u32 el_size, u32 el_count, ++ bce_sq_completion compl, void *userdata); ++void bce_get_sq_memcfg(struct bce_queue_sq *sq, struct bce_queue_cq *cq, struct bce_queue_memcfg *cfg); ++void bce_free_sq(struct apple_bce_device *dev, struct bce_queue_sq *sq); ++int bce_reserve_submission(struct bce_queue_sq *sq, unsigned long *timeout); ++void bce_cancel_submission_reservation(struct bce_queue_sq *sq); ++void *bce_next_submission(struct bce_queue_sq *sq); ++void bce_submit_to_device(struct bce_queue_sq *sq); ++void bce_notify_submission_complete(struct bce_queue_sq *sq); ++ ++void bce_set_submission_single(struct bce_qe_submission *element, dma_addr_t addr, size_t size); ++ ++struct bce_queue_cmdq *bce_alloc_cmdq(struct apple_bce_device *dev, int qid, u32 el_count); ++void bce_free_cmdq(struct apple_bce_device *dev, struct bce_queue_cmdq *cmdq); ++ ++u32 bce_cmd_register_queue(struct bce_queue_cmdq *cmdq, struct bce_queue_memcfg *cfg, const char *name, bool isdirout); ++u32 bce_cmd_unregister_memory_queue(struct bce_queue_cmdq *cmdq, u16 qid); ++u32 bce_cmd_flush_memory_queue(struct bce_queue_cmdq *cmdq, u16 qid); ++ ++ ++/* User API - Creates and registers the queue */ ++ ++struct bce_queue_cq *bce_create_cq(struct apple_bce_device *dev, u32 el_count); ++struct bce_queue_sq *bce_create_sq(struct apple_bce_device *dev, struct bce_queue_cq *cq, const char *name, u32 el_count, ++ int direction, bce_sq_completion compl, void *userdata); ++void bce_destroy_cq(struct apple_bce_device *dev, struct bce_queue_cq *cq); ++void bce_destroy_sq(struct apple_bce_device *dev, struct bce_queue_sq *sq); ++ ++#endif //BCEDRIVER_MAILBOX_H +diff --git a/drivers/staging/apple-bce/queue_dma.c b/drivers/staging/apple-bce/queue_dma.c +new file mode 100644 +index 000000000000..b236613285c0 +--- /dev/null ++++ b/drivers/staging/apple-bce/queue_dma.c +@@ -0,0 +1,220 @@ ++#include "queue_dma.h" ++#include ++#include ++#include "queue.h" ++ ++static int bce_alloc_scatterlist_from_vm(struct sg_table *tbl, void *data, size_t len); ++static struct bce_segment_list_element_hostinfo *bce_map_segment_list( ++ struct device *dev, struct scatterlist *pages, int pagen); ++static void bce_unmap_segement_list(struct device *dev, struct bce_segment_list_element_hostinfo *list); ++ ++int bce_map_dma_buffer(struct device *dev, struct bce_dma_buffer *buf, struct sg_table scatterlist, ++ enum dma_data_direction dir) +{ -+ struct Scsi_Host *shost = sdev->host; ++ int cnt; + -+ sdev_dbg(sdev, "enabling tagging (queue depth: %i).\n", sdev->queue_depth); -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) -+ if (!shost_use_blk_mq(shost) && shost->bqt) { -+#else -+ if (shost->bqt) { -+#endif -+ blk_queue_init_tags(sdev->request_queue, sdev->queue_depth, shost->bqt); ++ buf->direction = dir; ++ buf->scatterlist = scatterlist; ++ buf->seglist_hostinfo = NULL; ++ ++ cnt = dma_map_sg(dev, buf->scatterlist.sgl, buf->scatterlist.nents, dir); ++ if (cnt != buf->scatterlist.nents) { ++ pr_err("apple-bce: DMA scatter list mapping returned an unexpected count: %i\n", cnt); ++ dma_unmap_sg(dev, buf->scatterlist.sgl, buf->scatterlist.nents, dir); ++ return -EIO; + } -+ scsi_adjust_queue_depth(sdev, 0, sdev->queue_depth); ++ if (cnt == 1) ++ return 0; + ++ buf->seglist_hostinfo = bce_map_segment_list(dev, buf->scatterlist.sgl, buf->scatterlist.nents); ++ if (!buf->seglist_hostinfo) { ++ pr_err("apple-bce: Creating segment list failed\n"); ++ dma_unmap_sg(dev, buf->scatterlist.sgl, buf->scatterlist.nents, dir); ++ return -EIO; ++ } + return 0; +} -+#endif + -+static void vhba_scan_devices_add (struct vhba_host *vhost, int bus, int id) ++int bce_map_dma_buffer_vm(struct device *dev, struct bce_dma_buffer *buf, void *data, size_t len, ++ enum dma_data_direction dir) +{ -+ struct scsi_device *sdev; -+ -+ sdev = scsi_device_lookup(vhost->shost, bus, id, 0); -+ if (!sdev) { -+ scsi_add_device(vhost->shost, bus, id, 0); -+ } else { -+ dev_warn(&vhost->shost->shost_gendev, "tried to add an already-existing device %d:%d:0!\n", bus, id); -+ scsi_device_put(sdev); ++ int status; ++ struct sg_table scatterlist; ++ if ((status = bce_alloc_scatterlist_from_vm(&scatterlist, data, len))) ++ return status; ++ if ((status = bce_map_dma_buffer(dev, buf, scatterlist, dir))) { ++ sg_free_table(&scatterlist); ++ return status; + } ++ return 0; +} + -+static void vhba_scan_devices_remove (struct vhba_host *vhost, int bus, int id) ++int bce_map_dma_buffer_km(struct device *dev, struct bce_dma_buffer *buf, void *data, size_t len, ++ enum dma_data_direction dir) +{ -+ struct scsi_device *sdev; -+ -+ sdev = scsi_device_lookup(vhost->shost, bus, id, 0); -+ if (sdev) { -+ scsi_remove_device(sdev); -+ scsi_device_put(sdev); -+ } else { -+ dev_warn(&vhost->shost->shost_gendev, "tried to remove non-existing device %d:%d:0!\n", bus, id); ++ /* Kernel memory is continuous which is great for us. */ ++ int status; ++ struct sg_table scatterlist; ++ if ((status = sg_alloc_table(&scatterlist, 1, GFP_KERNEL))) { ++ sg_free_table(&scatterlist); ++ return status; ++ } ++ sg_set_buf(scatterlist.sgl, data, (uint) len); ++ if ((status = bce_map_dma_buffer(dev, buf, scatterlist, dir))) { ++ sg_free_table(&scatterlist); ++ return status; + } ++ return 0; +} + -+static void vhba_scan_devices (struct work_struct *work) ++void bce_unmap_dma_buffer(struct device *dev, struct bce_dma_buffer *buf) +{ -+ struct vhba_host *vhost = container_of(work, struct vhba_host, scan_devices); -+ unsigned long flags; -+ int change, exists; -+ unsigned int devnum; -+ unsigned int bus, id; ++ dma_unmap_sg(dev, buf->scatterlist.sgl, buf->scatterlist.nents, buf->direction); ++ bce_unmap_segement_list(dev, buf->seglist_hostinfo); ++} + -+ for (;;) { -+ spin_lock_irqsave(&vhost->dev_lock, flags); + -+ devnum = find_first_bit(vhost->chgmap, VHBA_MAX_DEVICES); -+ if (devnum >= VHBA_MAX_DEVICES) { -+ spin_unlock_irqrestore(&vhost->dev_lock, flags); -+ break; -+ } -+ change = vhost->chgtype[devnum]; -+ exists = vhost->devices[devnum] != NULL; ++static int bce_alloc_scatterlist_from_vm(struct sg_table *tbl, void *data, size_t len) ++{ ++ int status, i; ++ struct page **pages; ++ size_t off, start_page, end_page, page_count; ++ off = (size_t) data % PAGE_SIZE; ++ start_page = (size_t) data / PAGE_SIZE; ++ end_page = ((size_t) data + len - 1) / PAGE_SIZE; ++ page_count = end_page - start_page + 1; + -+ vhost->chgtype[devnum] = 0; -+ clear_bit(devnum, vhost->chgmap); ++ if (page_count > PAGE_SIZE / sizeof(struct page *)) ++ pages = vmalloc(page_count * sizeof(struct page *)); ++ else ++ pages = kmalloc(page_count * sizeof(struct page *), GFP_KERNEL); + -+ spin_unlock_irqrestore(&vhost->dev_lock, flags); ++ for (i = 0; i < page_count; i++) ++ pages[i] = vmalloc_to_page((void *) ((start_page + i) * PAGE_SIZE)); + -+ devnum_to_bus_and_id(devnum, &bus, &id); ++ if ((status = sg_alloc_table_from_pages(tbl, pages, page_count, (unsigned int) off, len, GFP_KERNEL))) { ++ sg_free_table(tbl); ++ } + -+ if (change < 0) { -+ dev_dbg(&vhost->shost->shost_gendev, "trying to remove target %d:%d:0\n", bus, id); -+ vhba_scan_devices_remove(vhost, bus, id); -+ } else if (change > 0) { -+ dev_dbg(&vhost->shost->shost_gendev, "trying to add target %d:%d:0\n", bus, id); -+ vhba_scan_devices_add(vhost, bus, id); -+ } else { -+ /* quick sequence of add/remove or remove/add; we determine -+ which one it was by checking if device structure exists */ -+ if (exists) { -+ /* remove followed by add: remove and (re)add */ -+ dev_dbg(&vhost->shost->shost_gendev, "trying to (re)add target %d:%d:0\n", bus, id); -+ vhba_scan_devices_remove(vhost, bus, id); -+ vhba_scan_devices_add(vhost, bus, id); ++ if (page_count > PAGE_SIZE / sizeof(struct page *)) ++ vfree(pages); ++ else ++ kfree(pages); ++ return status; ++} ++ ++#define BCE_ELEMENTS_PER_PAGE ((PAGE_SIZE - sizeof(struct bce_segment_list_header)) \ ++ / sizeof(struct bce_segment_list_element)) ++#define BCE_ELEMENTS_PER_ADDITIONAL_PAGE (PAGE_SIZE / sizeof(struct bce_segment_list_element)) ++ ++static struct bce_segment_list_element_hostinfo *bce_map_segment_list( ++ struct device *dev, struct scatterlist *pages, int pagen) ++{ ++ size_t ptr, pptr = 0; ++ struct bce_segment_list_header theader; /* a temp header, to store the initial seg */ ++ struct bce_segment_list_header *header; ++ struct bce_segment_list_element *el, *el_end; ++ struct bce_segment_list_element_hostinfo *out, *pout, *out_root; ++ struct scatterlist *sg; ++ int i; ++ header = &theader; ++ out = out_root = NULL; ++ el = el_end = NULL; ++ for_each_sg(pages, sg, pagen, i) { ++ if (el >= el_end) { ++ /* allocate a new page, this will be also done for the first element */ ++ ptr = __get_free_page(GFP_KERNEL); ++ if (pptr && ptr == pptr + PAGE_SIZE) { ++ out->page_count++; ++ header->element_count += BCE_ELEMENTS_PER_ADDITIONAL_PAGE; ++ el_end += BCE_ELEMENTS_PER_ADDITIONAL_PAGE; + } else { -+ /* add followed by remove: no-op */ -+ dev_dbg(&vhost->shost->shost_gendev, "no-op for target %d:%d:0\n", bus, id); ++ header = (void *) ptr; ++ header->element_count = BCE_ELEMENTS_PER_PAGE; ++ header->data_size = 0; ++ header->next_segl_addr = 0; ++ header->next_segl_length = 0; ++ el = (void *) (header + 1); ++ el_end = el + BCE_ELEMENTS_PER_PAGE; ++ ++ if (out) { ++ out->next = kmalloc(sizeof(struct bce_segment_list_element_hostinfo), GFP_KERNEL); ++ out = out->next; ++ } else { ++ out_root = out = kmalloc(sizeof(struct bce_segment_list_element_hostinfo), GFP_KERNEL); ++ } ++ out->page_start = (void *) ptr; ++ out->page_count = 1; ++ out->dma_start = DMA_MAPPING_ERROR; ++ out->next = NULL; + } ++ pptr = ptr; ++ } ++ el->addr = sg->dma_address; ++ el->length = sg->length; ++ header->data_size += el->length; ++ } ++ ++ /* DMA map */ ++ out = out_root; ++ pout = NULL; ++ while (out) { ++ out->dma_start = dma_map_single(dev, out->page_start, out->page_count * PAGE_SIZE, DMA_TO_DEVICE); ++ if (dma_mapping_error(dev, out->dma_start)) ++ goto error; ++ if (pout) { ++ header = pout->page_start; ++ header->next_segl_addr = out->dma_start; ++ header->next_segl_length = out->page_count * PAGE_SIZE; + } ++ pout = out; ++ out = out->next; ++ } ++ return out_root; ++ ++ error: ++ bce_unmap_segement_list(dev, out_root); ++ return NULL; ++} ++ ++static void bce_unmap_segement_list(struct device *dev, struct bce_segment_list_element_hostinfo *list) ++{ ++ struct bce_segment_list_element_hostinfo *next; ++ while (list) { ++ if (list->dma_start != DMA_MAPPING_ERROR) ++ dma_unmap_single(dev, list->dma_start, list->page_count * PAGE_SIZE, DMA_TO_DEVICE); ++ next = list->next; ++ kfree(list); ++ list = next; ++ } ++} ++ ++int bce_set_submission_buf(struct bce_qe_submission *element, struct bce_dma_buffer *buf, size_t offset, size_t length) ++{ ++ struct bce_segment_list_element_hostinfo *seg; ++ struct bce_segment_list_header *seg_header; ++ ++ seg = buf->seglist_hostinfo; ++ if (!seg) { ++ element->addr = buf->scatterlist.sgl->dma_address + offset; ++ element->length = length; ++ element->segl_addr = 0; ++ element->segl_length = 0; ++ return 0; ++ } ++ ++ while (seg) { ++ seg_header = seg->page_start; ++ if (offset <= seg_header->data_size) ++ break; ++ offset -= seg_header->data_size; ++ seg = seg->next; + } ++ if (!seg) ++ return -EINVAL; ++ element->addr = offset; ++ element->length = buf->scatterlist.sgl->dma_length; ++ element->segl_addr = seg->dma_start; ++ element->segl_length = seg->page_count * PAGE_SIZE; ++ return 0; ++} +\ No newline at end of file +diff --git a/drivers/staging/apple-bce/queue_dma.h b/drivers/staging/apple-bce/queue_dma.h +new file mode 100644 +index 000000000000..f8a57e50e7a3 +--- /dev/null ++++ b/drivers/staging/apple-bce/queue_dma.h +@@ -0,0 +1,50 @@ ++#ifndef BCE_QUEUE_DMA_H ++#define BCE_QUEUE_DMA_H ++ ++#include ++ ++struct bce_qe_submission; ++ ++struct bce_segment_list_header { ++ u64 element_count; ++ u64 data_size; ++ ++ u64 next_segl_addr; ++ u64 next_segl_length; ++}; ++struct bce_segment_list_element { ++ u64 addr; ++ u64 length; ++}; ++ ++struct bce_segment_list_element_hostinfo { ++ struct bce_segment_list_element_hostinfo *next; ++ void *page_start; ++ size_t page_count; ++ dma_addr_t dma_start; ++}; ++ ++ ++struct bce_dma_buffer { ++ enum dma_data_direction direction; ++ struct sg_table scatterlist; ++ struct bce_segment_list_element_hostinfo *seglist_hostinfo; ++}; ++ ++/* NOTE: Takes ownership of the sg_table if it succeeds. Ownership is not transferred on failure. */ ++int bce_map_dma_buffer(struct device *dev, struct bce_dma_buffer *buf, struct sg_table scatterlist, ++ enum dma_data_direction dir); ++ ++/* Creates a buffer from virtual memory (vmalloc) */ ++int bce_map_dma_buffer_vm(struct device *dev, struct bce_dma_buffer *buf, void *data, size_t len, ++ enum dma_data_direction dir); ++ ++/* Creates a buffer from kernel memory (kmalloc) */ ++int bce_map_dma_buffer_km(struct device *dev, struct bce_dma_buffer *buf, void *data, size_t len, ++ enum dma_data_direction dir); ++ ++void bce_unmap_dma_buffer(struct device *dev, struct bce_dma_buffer *buf); ++ ++int bce_set_submission_buf(struct bce_qe_submission *element, struct bce_dma_buffer *buf, size_t offset, size_t length); ++ ++#endif //BCE_QUEUE_DMA_H +diff --git a/drivers/staging/apple-bce/vhci/command.h b/drivers/staging/apple-bce/vhci/command.h +new file mode 100644 +index 000000000000..26619e0bccfa +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/command.h +@@ -0,0 +1,204 @@ ++#ifndef BCE_VHCI_COMMAND_H ++#define BCE_VHCI_COMMAND_H ++ ++#include "queue.h" ++#include ++#include ++ ++#define BCE_VHCI_CMD_TIMEOUT_SHORT msecs_to_jiffies(2000) ++#define BCE_VHCI_CMD_TIMEOUT_LONG msecs_to_jiffies(30000) ++ ++#define BCE_VHCI_BULK_MAX_ACTIVE_URBS_POW2 2 ++#define BCE_VHCI_BULK_MAX_ACTIVE_URBS (1 << BCE_VHCI_BULK_MAX_ACTIVE_URBS_POW2) ++ ++typedef u8 bce_vhci_port_t; ++typedef u8 bce_vhci_device_t; ++ ++enum bce_vhci_command { ++ BCE_VHCI_CMD_CONTROLLER_ENABLE = 1, ++ BCE_VHCI_CMD_CONTROLLER_DISABLE = 2, ++ BCE_VHCI_CMD_CONTROLLER_START = 3, ++ BCE_VHCI_CMD_CONTROLLER_PAUSE = 4, ++ ++ BCE_VHCI_CMD_PORT_POWER_ON = 0x10, ++ BCE_VHCI_CMD_PORT_POWER_OFF = 0x11, ++ BCE_VHCI_CMD_PORT_RESUME = 0x12, ++ BCE_VHCI_CMD_PORT_SUSPEND = 0x13, ++ BCE_VHCI_CMD_PORT_RESET = 0x14, ++ BCE_VHCI_CMD_PORT_DISABLE = 0x15, ++ BCE_VHCI_CMD_PORT_STATUS = 0x16, ++ ++ BCE_VHCI_CMD_DEVICE_CREATE = 0x30, ++ BCE_VHCI_CMD_DEVICE_DESTROY = 0x31, ++ ++ BCE_VHCI_CMD_ENDPOINT_CREATE = 0x40, ++ BCE_VHCI_CMD_ENDPOINT_DESTROY = 0x41, ++ BCE_VHCI_CMD_ENDPOINT_SET_STATE = 0x42, ++ BCE_VHCI_CMD_ENDPOINT_RESET = 0x44, ++ ++ /* Device to host only */ ++ BCE_VHCI_CMD_ENDPOINT_REQUEST_STATE = 0x43, ++ BCE_VHCI_CMD_TRANSFER_REQUEST = 0x1000, ++ BCE_VHCI_CMD_CONTROL_TRANSFER_STATUS = 0x1005 ++}; ++ ++enum bce_vhci_endpoint_state { ++ BCE_VHCI_ENDPOINT_ACTIVE = 0, ++ BCE_VHCI_ENDPOINT_PAUSED = 1, ++ BCE_VHCI_ENDPOINT_STALLED = 2 ++}; ++ ++static inline int bce_vhci_cmd_controller_enable(struct bce_vhci_command_queue *q, u8 busNum, u16 *portMask) ++{ ++ int status; ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_CONTROLLER_ENABLE; ++ cmd.param1 = 0x7100u | busNum; ++ status = bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++ if (!status) ++ *portMask = (u16) res.param2; ++ return status; ++} ++static inline int bce_vhci_cmd_controller_disable(struct bce_vhci_command_queue *q) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_CONTROLLER_DISABLE; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++} ++static inline int bce_vhci_cmd_controller_start(struct bce_vhci_command_queue *q) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_CONTROLLER_START; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++} ++static inline int bce_vhci_cmd_controller_pause(struct bce_vhci_command_queue *q) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_CONTROLLER_PAUSE; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++} ++ ++static inline int bce_vhci_cmd_port_power_on(struct bce_vhci_command_queue *q, bce_vhci_port_t port) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_POWER_ON; ++ cmd.param1 = port; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} ++static inline int bce_vhci_cmd_port_power_off(struct bce_vhci_command_queue *q, bce_vhci_port_t port) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_POWER_OFF; ++ cmd.param1 = port; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} ++static inline int bce_vhci_cmd_port_resume(struct bce_vhci_command_queue *q, bce_vhci_port_t port) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_RESUME; ++ cmd.param1 = port; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++} ++static inline int bce_vhci_cmd_port_suspend(struct bce_vhci_command_queue *q, bce_vhci_port_t port) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_SUSPEND; ++ cmd.param1 = port; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++} ++static inline int bce_vhci_cmd_port_reset(struct bce_vhci_command_queue *q, bce_vhci_port_t port, u32 timeout) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_RESET; ++ cmd.param1 = port; ++ cmd.param2 = timeout; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} ++static inline int bce_vhci_cmd_port_disable(struct bce_vhci_command_queue *q, bce_vhci_port_t port) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_DISABLE; ++ cmd.param1 = port; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} ++static inline int bce_vhci_cmd_port_status(struct bce_vhci_command_queue *q, bce_vhci_port_t port, ++ u32 clearFlags, u32 *resStatus) ++{ ++ int status; ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_PORT_STATUS; ++ cmd.param1 = port; ++ cmd.param2 = clearFlags & 0x560000; ++ status = bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++ if (status >= 0) ++ *resStatus = (u32) res.param2; ++ return status; +} + -+static int vhba_add_device (struct vhba_device *vdev) ++static inline int bce_vhci_cmd_device_create(struct bce_vhci_command_queue *q, bce_vhci_port_t port, ++ bce_vhci_device_t *dev) +{ -+ struct vhba_host *vhost; -+ unsigned int devnum; -+ unsigned long flags; ++ int status; ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_DEVICE_CREATE; ++ cmd.param1 = port; ++ status = bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++ if (!status) ++ *dev = (bce_vhci_device_t) res.param2; ++ return status; ++} ++static inline int bce_vhci_cmd_device_destroy(struct bce_vhci_command_queue *q, bce_vhci_device_t dev) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_DEVICE_DESTROY; ++ cmd.param1 = dev; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_LONG); ++} ++ ++static inline int bce_vhci_cmd_endpoint_create(struct bce_vhci_command_queue *q, bce_vhci_device_t dev, ++ struct usb_endpoint_descriptor *desc) ++{ ++ struct bce_vhci_message cmd, res; ++ int endpoint_type = usb_endpoint_type(desc); ++ int maxp = usb_endpoint_maxp(desc); ++ int maxp_burst = usb_endpoint_maxp_mult(desc) * maxp; ++ u8 max_active_requests_pow2 = 0; ++ cmd.cmd = BCE_VHCI_CMD_ENDPOINT_CREATE; ++ cmd.param1 = dev | ((desc->bEndpointAddress & 0x8Fu) << 8); ++ if (endpoint_type == USB_ENDPOINT_XFER_BULK) ++ max_active_requests_pow2 = BCE_VHCI_BULK_MAX_ACTIVE_URBS_POW2; ++ cmd.param2 = endpoint_type | ((max_active_requests_pow2 & 0xf) << 4) | (maxp << 16) | ((u64) maxp_burst << 32); ++ if (endpoint_type == USB_ENDPOINT_XFER_INT) ++ cmd.param2 |= (desc->bInterval - 1) << 8; ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} ++static inline int bce_vhci_cmd_endpoint_destroy(struct bce_vhci_command_queue *q, bce_vhci_device_t dev, u8 endpoint) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_ENDPOINT_DESTROY; ++ cmd.param1 = dev | (endpoint << 8); ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} ++static inline int bce_vhci_cmd_endpoint_set_state(struct bce_vhci_command_queue *q, bce_vhci_device_t dev, u8 endpoint, ++ enum bce_vhci_endpoint_state newState, enum bce_vhci_endpoint_state *retState) ++{ ++ int status; ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_ENDPOINT_SET_STATE; ++ cmd.param1 = dev | (endpoint << 8); ++ cmd.param2 = (u64) newState; ++ status = bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++ if (status != BCE_VHCI_INTERNAL_ERROR && status != BCE_VHCI_NO_POWER) ++ *retState = (enum bce_vhci_endpoint_state) res.param2; ++ return status; ++} ++static inline int bce_vhci_cmd_endpoint_reset(struct bce_vhci_command_queue *q, bce_vhci_device_t dev, u8 endpoint) ++{ ++ struct bce_vhci_message cmd, res; ++ cmd.cmd = BCE_VHCI_CMD_ENDPOINT_RESET; ++ cmd.param1 = dev | (endpoint << 8); ++ return bce_vhci_command_queue_execute(q, &cmd, &res, BCE_VHCI_CMD_TIMEOUT_SHORT); ++} + -+ vhost = platform_get_drvdata(&vhba_platform_device); + -+ vhba_device_get(vdev); ++#endif //BCE_VHCI_COMMAND_H +diff --git a/drivers/staging/apple-bce/vhci/queue.c b/drivers/staging/apple-bce/vhci/queue.c +new file mode 100644 +index 000000000000..7b0b5027157b +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/queue.c +@@ -0,0 +1,268 @@ ++#include "queue.h" ++#include "vhci.h" ++#include "../apple_bce.h" + -+ spin_lock_irqsave(&vhost->dev_lock, flags); -+ if (vhost->num_devices >= VHBA_MAX_DEVICES) { -+ spin_unlock_irqrestore(&vhost->dev_lock, flags); -+ vhba_device_put(vdev); -+ return -EBUSY; -+ } + -+ for (devnum = 0; devnum < VHBA_MAX_DEVICES; devnum++) { -+ if (vhost->devices[devnum] == NULL) { -+ vdev->num = devnum; -+ vhost->devices[devnum] = vdev; -+ vhost->num_devices++; -+ set_bit(devnum, vhost->chgmap); -+ vhost->chgtype[devnum]++; -+ break; -+ } ++static void bce_vhci_message_queue_completion(struct bce_queue_sq *sq); ++ ++int bce_vhci_message_queue_create(struct bce_vhci *vhci, struct bce_vhci_message_queue *ret, const char *name) ++{ ++ int status; ++ ret->cq = bce_create_cq(vhci->dev, VHCI_EVENT_QUEUE_EL_COUNT); ++ if (!ret->cq) ++ return -EINVAL; ++ ret->sq = bce_create_sq(vhci->dev, ret->cq, name, VHCI_EVENT_QUEUE_EL_COUNT, DMA_TO_DEVICE, ++ bce_vhci_message_queue_completion, ret); ++ if (!ret->sq) { ++ status = -EINVAL; ++ goto fail_cq; + } -+ spin_unlock_irqrestore(&vhost->dev_lock, flags); ++ ret->data = dma_alloc_coherent(&vhci->dev->pci->dev, sizeof(struct bce_vhci_message) * VHCI_EVENT_QUEUE_EL_COUNT, ++ &ret->dma_addr, GFP_KERNEL); ++ if (!ret->data) { ++ status = -EINVAL; ++ goto fail_sq; ++ } ++ return 0; + -+ schedule_work(&vhost->scan_devices); ++fail_sq: ++ bce_destroy_sq(vhci->dev, ret->sq); ++ ret->sq = NULL; ++fail_cq: ++ bce_destroy_cq(vhci->dev, ret->cq); ++ ret->cq = NULL; ++ return status; ++} + -+ return 0; ++void bce_vhci_message_queue_destroy(struct bce_vhci *vhci, struct bce_vhci_message_queue *q) ++{ ++ if (!q->cq) ++ return; ++ dma_free_coherent(&vhci->dev->pci->dev, sizeof(struct bce_vhci_message) * VHCI_EVENT_QUEUE_EL_COUNT, ++ q->data, q->dma_addr); ++ bce_destroy_sq(vhci->dev, q->sq); ++ bce_destroy_cq(vhci->dev, q->cq); +} + -+static int vhba_remove_device (struct vhba_device *vdev) ++void bce_vhci_message_queue_write(struct bce_vhci_message_queue *q, struct bce_vhci_message *req) +{ -+ struct vhba_host *vhost; -+ unsigned long flags; ++ int sidx; ++ struct bce_qe_submission *s; ++ sidx = q->sq->tail; ++ s = bce_next_submission(q->sq); ++ pr_debug("bce-vhci: Send message: %x s=%x p1=%x p2=%llx\n", req->cmd, req->status, req->param1, req->param2); ++ q->data[sidx] = *req; ++ bce_set_submission_single(s, q->dma_addr + sizeof(struct bce_vhci_message) * sidx, ++ sizeof(struct bce_vhci_message)); ++ bce_submit_to_device(q->sq); ++} + -+ vhost = platform_get_drvdata(&vhba_platform_device); ++static void bce_vhci_message_queue_completion(struct bce_queue_sq *sq) ++{ ++ while (bce_next_completion(sq)) ++ bce_notify_submission_complete(sq); ++} + -+ spin_lock_irqsave(&vhost->dev_lock, flags); -+ set_bit(vdev->num, vhost->chgmap); -+ vhost->chgtype[vdev->num]--; -+ vhost->devices[vdev->num] = NULL; -+ vhost->num_devices--; -+ spin_unlock_irqrestore(&vhost->dev_lock, flags); + -+ vhba_device_put(vdev); + -+ schedule_work(&vhost->scan_devices); ++static void bce_vhci_event_queue_completion(struct bce_queue_sq *sq); ++ ++int __bce_vhci_event_queue_create(struct bce_vhci *vhci, struct bce_vhci_event_queue *ret, const char *name, ++ bce_sq_completion compl) ++{ ++ ret->vhci = vhci; ++ ++ ret->sq = bce_create_sq(vhci->dev, vhci->ev_cq, name, VHCI_EVENT_QUEUE_EL_COUNT, DMA_FROM_DEVICE, compl, ret); ++ if (!ret->sq) ++ return -EINVAL; ++ ret->data = dma_alloc_coherent(&vhci->dev->pci->dev, sizeof(struct bce_vhci_message) * VHCI_EVENT_QUEUE_EL_COUNT, ++ &ret->dma_addr, GFP_KERNEL); ++ if (!ret->data) { ++ bce_destroy_sq(vhci->dev, ret->sq); ++ ret->sq = NULL; ++ return -EINVAL; ++ } + ++ init_completion(&ret->queue_empty_completion); ++ bce_vhci_event_queue_submit_pending(ret, VHCI_EVENT_PENDING_COUNT); + return 0; +} + -+static struct vhba_device *vhba_lookup_device (int devnum) ++int bce_vhci_event_queue_create(struct bce_vhci *vhci, struct bce_vhci_event_queue *ret, const char *name, ++ bce_vhci_event_queue_callback cb) +{ -+ struct vhba_host *vhost; -+ struct vhba_device *vdev = NULL; -+ unsigned long flags; ++ ret->cb = cb; ++ return __bce_vhci_event_queue_create(vhci, ret, name, bce_vhci_event_queue_completion); ++} + -+ vhost = platform_get_drvdata(&vhba_platform_device); ++void bce_vhci_event_queue_destroy(struct bce_vhci *vhci, struct bce_vhci_event_queue *q) ++{ ++ if (!q->sq) ++ return; ++ dma_free_coherent(&vhci->dev->pci->dev, sizeof(struct bce_vhci_message) * VHCI_EVENT_QUEUE_EL_COUNT, ++ q->data, q->dma_addr); ++ bce_destroy_sq(vhci->dev, q->sq); ++} + -+ if (likely(devnum < VHBA_MAX_DEVICES)) { -+ spin_lock_irqsave(&vhost->dev_lock, flags); -+ vdev = vhost->devices[devnum]; -+ if (vdev) { -+ vdev = vhba_device_get(vdev); ++static void bce_vhci_event_queue_completion(struct bce_queue_sq *sq) ++{ ++ struct bce_sq_completion_data *cd; ++ struct bce_vhci_event_queue *ev = sq->userdata; ++ struct bce_vhci_message *msg; ++ size_t cnt = 0; ++ ++ while ((cd = bce_next_completion(sq))) { ++ if (cd->status == BCE_COMPLETION_ABORTED) { /* We flushed the queue */ ++ bce_notify_submission_complete(sq); ++ continue; + } ++ msg = &ev->data[sq->head]; ++ pr_debug("bce-vhci: Got event: %x s=%x p1=%x p2=%llx\n", msg->cmd, msg->status, msg->param1, msg->param2); ++ ev->cb(ev, msg); + -+ spin_unlock_irqrestore(&vhost->dev_lock, flags); ++ bce_notify_submission_complete(sq); ++ ++cnt; + } ++ bce_vhci_event_queue_submit_pending(ev, cnt); ++ if (atomic_read(&sq->available_commands) == sq->el_count - 1) ++ complete(&ev->queue_empty_completion); ++} + -+ return vdev; ++void bce_vhci_event_queue_submit_pending(struct bce_vhci_event_queue *q, size_t count) ++{ ++ int idx; ++ struct bce_qe_submission *s; ++ while (count--) { ++ if (bce_reserve_submission(q->sq, NULL)) { ++ pr_err("bce-vhci: Failed to reserve an event queue submission\n"); ++ break; ++ } ++ idx = q->sq->tail; ++ s = bce_next_submission(q->sq); ++ bce_set_submission_single(s, ++ q->dma_addr + idx * sizeof(struct bce_vhci_message), sizeof(struct bce_vhci_message)); ++ } ++ bce_submit_to_device(q->sq); +} + -+static struct vhba_command *vhba_alloc_command (void) ++void bce_vhci_event_queue_pause(struct bce_vhci_event_queue *q) +{ -+ struct vhba_host *vhost; -+ struct vhba_command *vcmd; -+ unsigned long flags; -+ int i; ++ unsigned long timeout; ++ reinit_completion(&q->queue_empty_completion); ++ if (bce_cmd_flush_memory_queue(q->vhci->dev->cmd_cmdq, q->sq->qid)) ++ pr_warn("bce-vhci: failed to flush event queue\n"); ++ timeout = msecs_to_jiffies(5000); ++ while (atomic_read(&q->sq->available_commands) != q->sq->el_count - 1) { ++ timeout = wait_for_completion_timeout(&q->queue_empty_completion, timeout); ++ if (timeout == 0) { ++ pr_err("bce-vhci: waiting for queue to be flushed timed out\n"); ++ break; ++ } ++ } ++} + -+ vhost = platform_get_drvdata(&vhba_platform_device); ++void bce_vhci_event_queue_resume(struct bce_vhci_event_queue *q) ++{ ++ if (atomic_read(&q->sq->available_commands) != q->sq->el_count - 1) { ++ pr_err("bce-vhci: resume of a queue with pending submissions\n"); ++ return; ++ } ++ bce_vhci_event_queue_submit_pending(q, VHCI_EVENT_PENDING_COUNT); ++} + -+ spin_lock_irqsave(&vhost->cmd_lock, flags); ++void bce_vhci_command_queue_create(struct bce_vhci_command_queue *ret, struct bce_vhci_message_queue *mq) ++{ ++ ret->mq = mq; ++ ret->completion.result = NULL; ++ init_completion(&ret->completion.completion); ++ spin_lock_init(&ret->completion_lock); ++ mutex_init(&ret->mutex); ++} + -+ vcmd = vhost->commands + vhost->cmd_next++; -+ if (vcmd->status != VHBA_REQ_FREE) { -+ for (i = 0; i < vhba_can_queue; i++) { -+ vcmd = vhost->commands + i; ++void bce_vhci_command_queue_destroy(struct bce_vhci_command_queue *cq) ++{ ++ spin_lock(&cq->completion_lock); ++ if (cq->completion.result) { ++ memset(cq->completion.result, 0, sizeof(struct bce_vhci_message)); ++ cq->completion.result->status = BCE_VHCI_ABORT; ++ complete(&cq->completion.completion); ++ cq->completion.result = NULL; ++ } ++ spin_unlock(&cq->completion_lock); ++ mutex_lock(&cq->mutex); ++ mutex_unlock(&cq->mutex); ++ mutex_destroy(&cq->mutex); ++} + -+ if (vcmd->status == VHBA_REQ_FREE) { -+ vhost->cmd_next = i + 1; -+ break; -+ } -+ } ++void bce_vhci_command_queue_deliver_completion(struct bce_vhci_command_queue *cq, struct bce_vhci_message *msg) ++{ ++ struct bce_vhci_command_queue_completion *c = &cq->completion; + -+ if (i == vhba_can_queue) { -+ vcmd = NULL; ++ spin_lock(&cq->completion_lock); ++ if (c->result) { ++ *c->result = *msg; ++ complete(&c->completion); ++ c->result = NULL; ++ } ++ spin_unlock(&cq->completion_lock); ++} ++ ++static int __bce_vhci_command_queue_execute(struct bce_vhci_command_queue *cq, struct bce_vhci_message *req, ++ struct bce_vhci_message *res, unsigned long timeout) ++{ ++ int status; ++ struct bce_vhci_command_queue_completion *c; ++ struct bce_vhci_message creq; ++ c = &cq->completion; ++ ++ if ((status = bce_reserve_submission(cq->mq->sq, &timeout))) ++ return status; ++ ++ spin_lock(&cq->completion_lock); ++ c->result = res; ++ reinit_completion(&c->completion); ++ spin_unlock(&cq->completion_lock); ++ ++ bce_vhci_message_queue_write(cq->mq, req); ++ ++ if (!wait_for_completion_timeout(&c->completion, timeout)) { ++ /* we ran out of time, send cancellation */ ++ pr_debug("bce-vhci: command timed out req=%x\n", req->cmd); ++ if ((status = bce_reserve_submission(cq->mq->sq, &timeout))) ++ return status; ++ ++ creq = *req; ++ creq.cmd |= 0x4000; ++ bce_vhci_message_queue_write(cq->mq, &creq); ++ ++ if (!wait_for_completion_timeout(&c->completion, 1000)) { ++ pr_err("bce-vhci: Possible desync, cmd cancel timed out\n"); ++ ++ spin_lock(&cq->completion_lock); ++ c->result = NULL; ++ spin_unlock(&cq->completion_lock); ++ return -ETIMEDOUT; + } ++ if ((res->cmd & ~0x8000) == creq.cmd) ++ return -ETIMEDOUT; ++ /* reply for the previous command most likely arrived */ + } + -+ if (vcmd) { -+ vcmd->status = VHBA_REQ_PENDING; ++ if ((res->cmd & ~0x8000) != req->cmd) { ++ pr_err("bce-vhci: Possible desync, cmd reply mismatch req=%x, res=%x\n", req->cmd, res->cmd); ++ return -EIO; + } ++ if (res->status == BCE_VHCI_SUCCESS) ++ return 0; ++ return res->status; ++} + -+ vhost->cmd_next %= vhba_can_queue; ++int bce_vhci_command_queue_execute(struct bce_vhci_command_queue *cq, struct bce_vhci_message *req, ++ struct bce_vhci_message *res, unsigned long timeout) ++{ ++ int status; ++ mutex_lock(&cq->mutex); ++ status = __bce_vhci_command_queue_execute(cq, req, res, timeout); ++ mutex_unlock(&cq->mutex); ++ return status; ++} +diff --git a/drivers/staging/apple-bce/vhci/queue.h b/drivers/staging/apple-bce/vhci/queue.h +new file mode 100644 +index 000000000000..adb705b6ba1d +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/queue.h +@@ -0,0 +1,76 @@ ++#ifndef BCE_VHCI_QUEUE_H ++#define BCE_VHCI_QUEUE_H ++ ++#include ++#include "../queue.h" ++ ++#define VHCI_EVENT_QUEUE_EL_COUNT 256 ++#define VHCI_EVENT_PENDING_COUNT 32 ++ ++struct bce_vhci; ++struct bce_vhci_event_queue; ++ ++enum bce_vhci_message_status { ++ BCE_VHCI_SUCCESS = 1, ++ BCE_VHCI_ERROR = 2, ++ BCE_VHCI_USB_PIPE_STALL = 3, ++ BCE_VHCI_ABORT = 4, ++ BCE_VHCI_BAD_ARGUMENT = 5, ++ BCE_VHCI_OVERRUN = 6, ++ BCE_VHCI_INTERNAL_ERROR = 7, ++ BCE_VHCI_NO_POWER = 8, ++ BCE_VHCI_UNSUPPORTED = 9 ++}; ++struct bce_vhci_message { ++ u16 cmd; ++ u16 status; // bce_vhci_message_status ++ u32 param1; ++ u64 param2; ++}; + -+ spin_unlock_irqrestore(&vhost->cmd_lock, flags); ++struct bce_vhci_message_queue { ++ struct bce_queue_cq *cq; ++ struct bce_queue_sq *sq; ++ struct bce_vhci_message *data; ++ dma_addr_t dma_addr; ++}; ++typedef void (*bce_vhci_event_queue_callback)(struct bce_vhci_event_queue *q, struct bce_vhci_message *msg); ++struct bce_vhci_event_queue { ++ struct bce_vhci *vhci; ++ struct bce_queue_sq *sq; ++ struct bce_vhci_message *data; ++ dma_addr_t dma_addr; ++ bce_vhci_event_queue_callback cb; ++ struct completion queue_empty_completion; ++}; ++struct bce_vhci_command_queue_completion { ++ struct bce_vhci_message *result; ++ struct completion completion; ++}; ++struct bce_vhci_command_queue { ++ struct bce_vhci_message_queue *mq; ++ struct bce_vhci_command_queue_completion completion; ++ struct spinlock completion_lock; ++ struct mutex mutex; ++}; + -+ return vcmd; ++int bce_vhci_message_queue_create(struct bce_vhci *vhci, struct bce_vhci_message_queue *ret, const char *name); ++void bce_vhci_message_queue_destroy(struct bce_vhci *vhci, struct bce_vhci_message_queue *q); ++void bce_vhci_message_queue_write(struct bce_vhci_message_queue *q, struct bce_vhci_message *req); ++ ++int __bce_vhci_event_queue_create(struct bce_vhci *vhci, struct bce_vhci_event_queue *ret, const char *name, ++ bce_sq_completion compl); ++int bce_vhci_event_queue_create(struct bce_vhci *vhci, struct bce_vhci_event_queue *ret, const char *name, ++ bce_vhci_event_queue_callback cb); ++void bce_vhci_event_queue_destroy(struct bce_vhci *vhci, struct bce_vhci_event_queue *q); ++void bce_vhci_event_queue_submit_pending(struct bce_vhci_event_queue *q, size_t count); ++void bce_vhci_event_queue_pause(struct bce_vhci_event_queue *q); ++void bce_vhci_event_queue_resume(struct bce_vhci_event_queue *q); ++ ++void bce_vhci_command_queue_create(struct bce_vhci_command_queue *ret, struct bce_vhci_message_queue *mq); ++void bce_vhci_command_queue_destroy(struct bce_vhci_command_queue *cq); ++int bce_vhci_command_queue_execute(struct bce_vhci_command_queue *cq, struct bce_vhci_message *req, ++ struct bce_vhci_message *res, unsigned long timeout); ++void bce_vhci_command_queue_deliver_completion(struct bce_vhci_command_queue *cq, struct bce_vhci_message *msg); ++ ++#endif //BCE_VHCI_QUEUE_H +diff --git a/drivers/staging/apple-bce/vhci/transfer.c b/drivers/staging/apple-bce/vhci/transfer.c +new file mode 100644 +index 000000000000..8226363d69c8 +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/transfer.c +@@ -0,0 +1,661 @@ ++#include "transfer.h" ++#include "../queue.h" ++#include "vhci.h" ++#include "../apple_bce.h" ++#include ++ ++static void bce_vhci_transfer_queue_completion(struct bce_queue_sq *sq); ++static void bce_vhci_transfer_queue_giveback(struct bce_vhci_transfer_queue *q); ++static void bce_vhci_transfer_queue_remove_pending(struct bce_vhci_transfer_queue *q); ++ ++static int bce_vhci_urb_init(struct bce_vhci_urb *vurb); ++static int bce_vhci_urb_update(struct bce_vhci_urb *urb, struct bce_vhci_message *msg); ++static int bce_vhci_urb_transfer_completion(struct bce_vhci_urb *urb, struct bce_sq_completion_data *c); ++ ++static void bce_vhci_transfer_queue_reset_w(struct work_struct *work); ++ ++void bce_vhci_create_transfer_queue(struct bce_vhci *vhci, struct bce_vhci_transfer_queue *q, ++ struct usb_host_endpoint *endp, bce_vhci_device_t dev_addr, enum dma_data_direction dir) ++{ ++ char name[0x21]; ++ INIT_LIST_HEAD(&q->evq); ++ INIT_LIST_HEAD(&q->giveback_urb_list); ++ spin_lock_init(&q->urb_lock); ++ mutex_init(&q->pause_lock); ++ q->vhci = vhci; ++ q->endp = endp; ++ q->dev_addr = dev_addr; ++ q->endp_addr = (u8) (endp->desc.bEndpointAddress & 0x8F); ++ q->state = BCE_VHCI_ENDPOINT_ACTIVE; ++ q->active = true; ++ q->stalled = false; ++ q->max_active_requests = 1; ++ if (usb_endpoint_type(&endp->desc) == USB_ENDPOINT_XFER_BULK) ++ q->max_active_requests = BCE_VHCI_BULK_MAX_ACTIVE_URBS; ++ q->remaining_active_requests = q->max_active_requests; ++ q->cq = bce_create_cq(vhci->dev, 0x100); ++ INIT_WORK(&q->w_reset, bce_vhci_transfer_queue_reset_w); ++ q->sq_in = NULL; ++ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { ++ snprintf(name, sizeof(name), "VHC1-%i-%02x", dev_addr, 0x80 | usb_endpoint_num(&endp->desc)); ++ q->sq_in = bce_create_sq(vhci->dev, q->cq, name, 0x100, DMA_FROM_DEVICE, ++ bce_vhci_transfer_queue_completion, q); ++ } ++ q->sq_out = NULL; ++ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { ++ snprintf(name, sizeof(name), "VHC1-%i-%02x", dev_addr, usb_endpoint_num(&endp->desc)); ++ q->sq_out = bce_create_sq(vhci->dev, q->cq, name, 0x100, DMA_TO_DEVICE, ++ bce_vhci_transfer_queue_completion, q); ++ } +} + -+static void vhba_free_command (struct vhba_command *vcmd) ++void bce_vhci_destroy_transfer_queue(struct bce_vhci *vhci, struct bce_vhci_transfer_queue *q) +{ -+ struct vhba_host *vhost; -+ unsigned long flags; ++ bce_vhci_transfer_queue_giveback(q); ++ bce_vhci_transfer_queue_remove_pending(q); ++ if (q->sq_in) ++ bce_destroy_sq(vhci->dev, q->sq_in); ++ if (q->sq_out) ++ bce_destroy_sq(vhci->dev, q->sq_out); ++ bce_destroy_cq(vhci->dev, q->cq); ++} + -+ vhost = platform_get_drvdata(&vhba_platform_device); ++static inline bool bce_vhci_transfer_queue_can_init_urb(struct bce_vhci_transfer_queue *q) ++{ ++ return q->remaining_active_requests > 0; ++} + -+ spin_lock_irqsave(&vhost->cmd_lock, flags); -+ vcmd->status = VHBA_REQ_FREE; -+ spin_unlock_irqrestore(&vhost->cmd_lock, flags); ++static void bce_vhci_transfer_queue_defer_event(struct bce_vhci_transfer_queue *q, struct bce_vhci_message *msg) ++{ ++ struct bce_vhci_list_message *lm; ++ lm = kmalloc(sizeof(struct bce_vhci_list_message), GFP_KERNEL); ++ INIT_LIST_HEAD(&lm->list); ++ lm->msg = *msg; ++ list_add_tail(&lm->list, &q->evq); +} + -+static int vhba_queuecommand (struct Scsi_Host *shost, struct scsi_cmnd *cmd) ++static void bce_vhci_transfer_queue_giveback(struct bce_vhci_transfer_queue *q) +{ -+ struct vhba_device *vdev; -+ int retval; -+ unsigned int devnum; ++ unsigned long flags; ++ struct urb *urb; ++ spin_lock_irqsave(&q->urb_lock, flags); ++ while (!list_empty(&q->giveback_urb_list)) { ++ urb = list_first_entry(&q->giveback_urb_list, struct urb, urb_list); ++ list_del(&urb->urb_list); ++ ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ usb_hcd_giveback_urb(q->vhci->hcd, urb, urb->status); ++ spin_lock_irqsave(&q->urb_lock, flags); ++ } ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++} + -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0) -+ scmd_dbg(cmd, "queue %p tag %i\n", cmd, scsi_cmd_to_rq(cmd)->tag); -+#else -+ scmd_dbg(cmd, "queue %p tag %i\n", cmd, cmd->request->tag); -+#endif ++static void bce_vhci_transfer_queue_init_pending_urbs(struct bce_vhci_transfer_queue *q); + -+ devnum = bus_and_id_to_devnum(cmd->device->channel, cmd->device->id); -+ vdev = vhba_lookup_device(devnum); -+ if (!vdev) { -+ scmd_dbg(cmd, "no such device\n"); ++static void bce_vhci_transfer_queue_deliver_pending(struct bce_vhci_transfer_queue *q) ++{ ++ struct urb *urb; ++ struct bce_vhci_list_message *lm; + -+ cmd->result = DID_NO_CONNECT << 16; -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0) -+ scsi_done(cmd); -+#else -+ cmd->scsi_done(cmd); -+#endif ++ while (!list_empty(&q->endp->urb_list) && !list_empty(&q->evq)) { ++ urb = list_first_entry(&q->endp->urb_list, struct urb, urb_list); + -+ return 0; ++ lm = list_first_entry(&q->evq, struct bce_vhci_list_message, list); ++ if (bce_vhci_urb_update(urb->hcpriv, &lm->msg) == -EAGAIN) ++ break; ++ list_del(&lm->list); ++ kfree(lm); + } + -+ retval = vhba_device_queue(vdev, cmd); -+ -+ vhba_device_put(vdev); -+ -+ return retval; ++ /* some of the URBs could have been completed, so initialize more URBs if possible */ ++ bce_vhci_transfer_queue_init_pending_urbs(q); +} + -+static int vhba_abort (struct scsi_cmnd *cmd) ++static void bce_vhci_transfer_queue_remove_pending(struct bce_vhci_transfer_queue *q) +{ -+ struct vhba_device *vdev; -+ int retval = SUCCESS; -+ unsigned int devnum; -+ -+ scmd_dbg(cmd, "abort %p\n", cmd); ++ unsigned long flags; ++ struct bce_vhci_list_message *lm; ++ spin_lock_irqsave(&q->urb_lock, flags); ++ while (!list_empty(&q->evq)) { ++ lm = list_first_entry(&q->evq, struct bce_vhci_list_message, list); ++ list_del(&lm->list); ++ kfree(lm); ++ } ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++} + -+ devnum = bus_and_id_to_devnum(cmd->device->channel, cmd->device->id); -+ vdev = vhba_lookup_device(devnum); -+ if (vdev) { -+ retval = vhba_device_dequeue(vdev, cmd); -+ vhba_device_put(vdev); ++void bce_vhci_transfer_queue_event(struct bce_vhci_transfer_queue *q, struct bce_vhci_message *msg) ++{ ++ unsigned long flags; ++ struct bce_vhci_urb *turb; ++ struct urb *urb; ++ spin_lock_irqsave(&q->urb_lock, flags); ++ bce_vhci_transfer_queue_deliver_pending(q); ++ ++ if (msg->cmd == BCE_VHCI_CMD_TRANSFER_REQUEST && ++ (!list_empty(&q->evq) || list_empty(&q->endp->urb_list))) { ++ bce_vhci_transfer_queue_defer_event(q, msg); ++ goto complete; ++ } ++ if (list_empty(&q->endp->urb_list)) { ++ pr_err("bce-vhci: [%02x] Unexpected transfer queue event\n", q->endp_addr); ++ goto complete; ++ } ++ urb = list_first_entry(&q->endp->urb_list, struct urb, urb_list); ++ turb = urb->hcpriv; ++ if (bce_vhci_urb_update(turb, msg) == -EAGAIN) { ++ bce_vhci_transfer_queue_defer_event(q, msg); + } else { -+ cmd->result = DID_NO_CONNECT << 16; ++ bce_vhci_transfer_queue_init_pending_urbs(q); + } + -+ return retval; ++complete: ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ bce_vhci_transfer_queue_giveback(q); +} + -+static struct scsi_host_template vhba_template = { -+ .module = THIS_MODULE, -+ .name = "vhba", -+ .proc_name = "vhba", -+ .queuecommand = vhba_queuecommand, -+ .eh_abort_handler = vhba_abort, -+ .this_id = -1, -+ .max_sectors = VHBA_MAX_SECTORS_PER_IO, -+ .sg_tablesize = 256, -+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) -+ .slave_alloc = vhba_slave_alloc, -+#endif -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) -+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, -+#endif -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) -+ .use_blk_tags = 1, -+#endif -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) -+ .max_segment_size = VHBA_KBUF_SIZE, -+#endif -+}; ++static void bce_vhci_transfer_queue_completion(struct bce_queue_sq *sq) ++{ ++ unsigned long flags; ++ struct bce_sq_completion_data *c; ++ struct urb *urb; ++ struct bce_vhci_transfer_queue *q = sq->userdata; ++ spin_lock_irqsave(&q->urb_lock, flags); ++ while ((c = bce_next_completion(sq))) { ++ if (c->status == BCE_COMPLETION_ABORTED) { /* We flushed the queue */ ++ pr_debug("bce-vhci: [%02x] Got an abort completion\n", q->endp_addr); ++ bce_notify_submission_complete(sq); ++ continue; ++ } ++ if (list_empty(&q->endp->urb_list)) { ++ pr_err("bce-vhci: [%02x] Got a completion while no requests are pending\n", q->endp_addr); ++ continue; ++ } ++ pr_debug("bce-vhci: [%02x] Got a transfer queue completion\n", q->endp_addr); ++ urb = list_first_entry(&q->endp->urb_list, struct urb, urb_list); ++ bce_vhci_urb_transfer_completion(urb->hcpriv, c); ++ bce_notify_submission_complete(sq); ++ } ++ bce_vhci_transfer_queue_deliver_pending(q); ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ bce_vhci_transfer_queue_giveback(q); ++} + -+static ssize_t do_request (struct vhba_device *vdev, unsigned long metatag, struct scsi_cmnd *cmd, char __user *buf, size_t buf_len) ++int bce_vhci_transfer_queue_do_pause(struct bce_vhci_transfer_queue *q) +{ -+ struct vhba_request vreq; -+ ssize_t ret; ++ unsigned long flags; ++ int status; ++ u8 endp_addr = (u8) (q->endp->desc.bEndpointAddress & 0x8F); ++ spin_lock_irqsave(&q->urb_lock, flags); ++ q->active = false; ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ if (q->sq_out) { ++ pr_err("bce-vhci: Not implemented: wait for pending output requests\n"); ++ } ++ bce_vhci_transfer_queue_remove_pending(q); ++ if ((status = bce_vhci_cmd_endpoint_set_state( ++ &q->vhci->cq, q->dev_addr, endp_addr, BCE_VHCI_ENDPOINT_PAUSED, &q->state))) ++ return status; ++ if (q->state != BCE_VHCI_ENDPOINT_PAUSED) ++ return -EINVAL; ++ if (q->sq_in) ++ bce_cmd_flush_memory_queue(q->vhci->dev->cmd_cmdq, (u16) q->sq_in->qid); ++ if (q->sq_out) ++ bce_cmd_flush_memory_queue(q->vhci->dev->cmd_cmdq, (u16) q->sq_out->qid); ++ return 0; ++} + -+ scmd_dbg(cmd, "request %lu (%p), cdb 0x%x, bufflen %d, sg count %d\n", -+ metatag, cmd, cmd->cmnd[0], scsi_bufflen(cmd), scsi_sg_count(cmd)); ++static void bce_vhci_urb_resume(struct bce_vhci_urb *urb); + -+ ret = sizeof(vreq); -+ if (DATA_TO_DEVICE(cmd->sc_data_direction)) { -+ ret += scsi_bufflen(cmd); ++int bce_vhci_transfer_queue_do_resume(struct bce_vhci_transfer_queue *q) ++{ ++ unsigned long flags; ++ int status; ++ struct urb *urb, *urbt; ++ struct bce_vhci_urb *vurb; ++ u8 endp_addr = (u8) (q->endp->desc.bEndpointAddress & 0x8F); ++ if ((status = bce_vhci_cmd_endpoint_set_state( ++ &q->vhci->cq, q->dev_addr, endp_addr, BCE_VHCI_ENDPOINT_ACTIVE, &q->state))) ++ return status; ++ if (q->state != BCE_VHCI_ENDPOINT_ACTIVE) ++ return -EINVAL; ++ spin_lock_irqsave(&q->urb_lock, flags); ++ q->active = true; ++ list_for_each_entry_safe(urb, urbt, &q->endp->urb_list, urb_list) { ++ vurb = urb->hcpriv; ++ if (vurb->state == BCE_VHCI_URB_INIT_PENDING) { ++ if (!bce_vhci_transfer_queue_can_init_urb(q)) ++ break; ++ bce_vhci_urb_init(vurb); ++ } else { ++ bce_vhci_urb_resume(vurb); ++ } + } ++ bce_vhci_transfer_queue_deliver_pending(q); ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ return 0; ++} + -+ if (ret > buf_len) { -+ scmd_dbg(cmd, "buffer too small (%zd < %zd) for a request\n", buf_len, ret); -+ return -EIO; ++int bce_vhci_transfer_queue_pause(struct bce_vhci_transfer_queue *q, enum bce_vhci_pause_source src) ++{ ++ int ret = 0; ++ mutex_lock(&q->pause_lock); ++ if ((q->paused_by & src) != src) { ++ if (!q->paused_by) ++ ret = bce_vhci_transfer_queue_do_pause(q); ++ if (!ret) ++ q->paused_by |= src; + } ++ mutex_unlock(&q->pause_lock); ++ return ret; ++} + -+ vreq.metatag = metatag; -+ vreq.lun = cmd->device->lun; -+ memcpy(vreq.cdb, cmd->cmnd, MAX_COMMAND_SIZE); -+ vreq.cdb_len = cmd->cmd_len; -+ vreq.data_len = scsi_bufflen(cmd); ++int bce_vhci_transfer_queue_resume(struct bce_vhci_transfer_queue *q, enum bce_vhci_pause_source src) ++{ ++ int ret = 0; ++ mutex_lock(&q->pause_lock); ++ if (q->paused_by & src) { ++ if (!(q->paused_by & ~src)) ++ ret = bce_vhci_transfer_queue_do_resume(q); ++ if (!ret) ++ q->paused_by &= ~src; ++ } ++ mutex_unlock(&q->pause_lock); ++ return ret; ++} + -+ if (copy_to_user(buf, &vreq, sizeof(vreq))) { -+ return -EFAULT; ++static void bce_vhci_transfer_queue_reset_w(struct work_struct *work) ++{ ++ unsigned long flags; ++ struct bce_vhci_transfer_queue *q = container_of(work, struct bce_vhci_transfer_queue, w_reset); ++ ++ mutex_lock(&q->pause_lock); ++ spin_lock_irqsave(&q->urb_lock, flags); ++ if (!q->stalled) { ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ mutex_unlock(&q->pause_lock); ++ return; ++ } ++ q->active = false; ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ q->paused_by |= BCE_VHCI_PAUSE_INTERNAL_WQ; ++ bce_vhci_transfer_queue_remove_pending(q); ++ if (q->sq_in) ++ bce_cmd_flush_memory_queue(q->vhci->dev->cmd_cmdq, (u16) q->sq_in->qid); ++ if (q->sq_out) ++ bce_cmd_flush_memory_queue(q->vhci->dev->cmd_cmdq, (u16) q->sq_out->qid); ++ bce_vhci_cmd_endpoint_reset(&q->vhci->cq, q->dev_addr, (u8) (q->endp->desc.bEndpointAddress & 0x8F)); ++ spin_lock_irqsave(&q->urb_lock, flags); ++ q->stalled = false; ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ mutex_unlock(&q->pause_lock); ++ bce_vhci_transfer_queue_resume(q, BCE_VHCI_PAUSE_INTERNAL_WQ); ++} ++ ++void bce_vhci_transfer_queue_request_reset(struct bce_vhci_transfer_queue *q) ++{ ++ queue_work(q->vhci->tq_state_wq, &q->w_reset); ++} ++ ++static void bce_vhci_transfer_queue_init_pending_urbs(struct bce_vhci_transfer_queue *q) ++{ ++ struct urb *urb, *urbt; ++ struct bce_vhci_urb *vurb; ++ list_for_each_entry_safe(urb, urbt, &q->endp->urb_list, urb_list) { ++ vurb = urb->hcpriv; ++ if (!bce_vhci_transfer_queue_can_init_urb(q)) ++ break; ++ if (vurb->state == BCE_VHCI_URB_INIT_PENDING) ++ bce_vhci_urb_init(vurb); + } ++} + -+ if (DATA_TO_DEVICE(cmd->sc_data_direction) && vreq.data_len) { -+ buf += sizeof(vreq); + -+ if (scsi_sg_count(cmd)) { -+ unsigned char *kaddr, *uaddr; -+ struct scatterlist *sglist = scsi_sglist(cmd); -+ struct scatterlist *sg; -+ int i; + -+ uaddr = (unsigned char *) buf; ++static int bce_vhci_urb_data_start(struct bce_vhci_urb *urb, unsigned long *timeout); + -+ for_each_sg(sglist, sg, scsi_sg_count(cmd), i) { -+ size_t len = sg->length; ++int bce_vhci_urb_create(struct bce_vhci_transfer_queue *q, struct urb *urb) ++{ ++ unsigned long flags; ++ int status = 0; ++ struct bce_vhci_urb *vurb; ++ vurb = kzalloc(sizeof(struct bce_vhci_urb), GFP_KERNEL); ++ urb->hcpriv = vurb; ++ ++ vurb->q = q; ++ vurb->urb = urb; ++ vurb->dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; ++ vurb->is_control = (usb_endpoint_num(&urb->ep->desc) == 0); ++ ++ spin_lock_irqsave(&q->urb_lock, flags); ++ status = usb_hcd_link_urb_to_ep(q->vhci->hcd, urb); ++ if (status) { ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ urb->hcpriv = NULL; ++ kfree(vurb); ++ return status; ++ } + -+ if (len > vdev->kbuf_size) { -+ scmd_dbg(cmd, "segment size (%zu) exceeds kbuf size (%zu)!", len, vdev->kbuf_size); -+ len = vdev->kbuf_size; -+ } ++ if (q->active) { ++ if (bce_vhci_transfer_queue_can_init_urb(vurb->q)) ++ status = bce_vhci_urb_init(vurb); ++ else ++ vurb->state = BCE_VHCI_URB_INIT_PENDING; ++ } else { ++ if (q->stalled) ++ bce_vhci_transfer_queue_request_reset(q); ++ vurb->state = BCE_VHCI_URB_INIT_PENDING; ++ } ++ if (status) { ++ usb_hcd_unlink_urb_from_ep(q->vhci->hcd, urb); ++ urb->hcpriv = NULL; ++ kfree(vurb); ++ } else { ++ bce_vhci_transfer_queue_deliver_pending(q); ++ } ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ pr_debug("bce-vhci: [%02x] URB enqueued (dir = %s, size = %i)\n", q->endp_addr, ++ usb_urb_dir_in(urb) ? "IN" : "OUT", urb->transfer_buffer_length); ++ return status; ++} + -+ kaddr = kmap_atomic(sg_page(sg)); -+ memcpy(vdev->kbuf, kaddr + sg->offset, len); -+ kunmap_atomic(kaddr); ++static int bce_vhci_urb_init(struct bce_vhci_urb *vurb) ++{ ++ int status = 0; + -+ if (copy_to_user(uaddr, vdev->kbuf, len)) { -+ return -EFAULT; -+ } -+ uaddr += len; -+ } -+ } else { -+ if (copy_to_user(buf, scsi_sglist(cmd), vreq.data_len)) { -+ return -EFAULT; -+ } -+ } ++ if (vurb->q->remaining_active_requests == 0) { ++ pr_err("bce-vhci: cannot init request (remaining_active_requests = 0)\n"); ++ return -EINVAL; + } + -+ return ret; ++ if (vurb->is_control) { ++ vurb->state = BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_REQUEST; ++ } else { ++ status = bce_vhci_urb_data_start(vurb, NULL); ++ } ++ ++ if (!status) { ++ --vurb->q->remaining_active_requests; ++ } ++ return status; +} + -+static ssize_t do_response (struct vhba_device *vdev, unsigned long metatag, struct scsi_cmnd *cmd, const char __user *buf, size_t buf_len, struct vhba_response *res) ++static void bce_vhci_urb_complete(struct bce_vhci_urb *urb, int status) +{ -+ ssize_t ret = 0; ++ struct bce_vhci_transfer_queue *q = urb->q; ++ struct bce_vhci *vhci = q->vhci; ++ struct urb *real_urb = urb->urb; ++ pr_debug("bce-vhci: [%02x] URB complete %i\n", q->endp_addr, status); ++ usb_hcd_unlink_urb_from_ep(vhci->hcd, real_urb); ++ real_urb->hcpriv = NULL; ++ real_urb->status = status; ++ if (urb->state != BCE_VHCI_URB_INIT_PENDING) ++ ++urb->q->remaining_active_requests; ++ kfree(urb); ++ list_add_tail(&real_urb->urb_list, &q->giveback_urb_list); ++} + -+ scmd_dbg(cmd, "response %lu (%p), status %x, data len %d, sg count %d\n", -+ metatag, cmd, res->status, res->data_len, scsi_sg_count(cmd)); ++int bce_vhci_urb_request_cancel(struct bce_vhci_transfer_queue *q, struct urb *urb, int status) ++{ ++ struct bce_vhci_urb *vurb; ++ unsigned long flags; ++ int ret; + -+ if (res->status) { -+ if (res->data_len > SCSI_SENSE_BUFFERSIZE) { -+ scmd_dbg(cmd, "truncate sense (%d < %d)", SCSI_SENSE_BUFFERSIZE, res->data_len); -+ res->data_len = SCSI_SENSE_BUFFERSIZE; -+ } ++ spin_lock_irqsave(&q->urb_lock, flags); ++ if ((ret = usb_hcd_check_unlink_urb(q->vhci->hcd, urb, status))) { ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ return ret; ++ } + -+ if (copy_from_user(cmd->sense_buffer, buf, res->data_len)) { -+ return -EFAULT; -+ } ++ vurb = urb->hcpriv; ++ /* If the URB wasn't posted to the device yet, we can still remove it on the host without pausing the queue. */ ++ if (vurb->state != BCE_VHCI_URB_INIT_PENDING) { ++ pr_debug("bce-vhci: [%02x] Cancelling URB\n", q->endp_addr); + -+ cmd->result = res->status; ++ spin_unlock_irqrestore(&q->urb_lock, flags); ++ bce_vhci_transfer_queue_pause(q, BCE_VHCI_PAUSE_INTERNAL_WQ); ++ spin_lock_irqsave(&q->urb_lock, flags); + -+ ret += res->data_len; -+ } else if (DATA_FROM_DEVICE(cmd->sc_data_direction) && scsi_bufflen(cmd)) { -+ size_t to_read; ++ ++q->remaining_active_requests; ++ } + -+ if (res->data_len > scsi_bufflen(cmd)) { -+ scmd_dbg(cmd, "truncate data (%d < %d)\n", scsi_bufflen(cmd), res->data_len); -+ res->data_len = scsi_bufflen(cmd); -+ } ++ usb_hcd_unlink_urb_from_ep(q->vhci->hcd, urb); + -+ to_read = res->data_len; ++ spin_unlock_irqrestore(&q->urb_lock, flags); + -+ if (scsi_sg_count(cmd)) { -+ unsigned char *kaddr, *uaddr; -+ struct scatterlist *sglist = scsi_sglist(cmd); -+ struct scatterlist *sg; -+ int i; ++ usb_hcd_giveback_urb(q->vhci->hcd, urb, status); + -+ uaddr = (unsigned char *)buf; ++ if (vurb->state != BCE_VHCI_URB_INIT_PENDING) ++ bce_vhci_transfer_queue_resume(q, BCE_VHCI_PAUSE_INTERNAL_WQ); + -+ for_each_sg(sglist, sg, scsi_sg_count(cmd), i) { -+ size_t len = (sg->length < to_read) ? sg->length : to_read; ++ kfree(vurb); + -+ if (len > vdev->kbuf_size) { -+ scmd_dbg(cmd, "segment size (%zu) exceeds kbuf size (%zu)!", len, vdev->kbuf_size); -+ len = vdev->kbuf_size; -+ } ++ return 0; ++} + -+ if (copy_from_user(vdev->kbuf, uaddr, len)) { -+ return -EFAULT; -+ } -+ uaddr += len; ++static int bce_vhci_urb_data_transfer_in(struct bce_vhci_urb *urb, unsigned long *timeout) ++{ ++ struct bce_vhci_message msg; ++ struct bce_qe_submission *s; ++ u32 tr_len; ++ int reservation1, reservation2 = -EFAULT; + -+ kaddr = kmap_atomic(sg_page(sg)); -+ memcpy(kaddr + sg->offset, vdev->kbuf, len); -+ kunmap_atomic(kaddr); ++ pr_debug("bce-vhci: [%02x] DMA from device %llx %x\n", urb->q->endp_addr, ++ (u64) urb->urb->transfer_dma, urb->urb->transfer_buffer_length); + -+ to_read -= len; -+ if (to_read == 0) { -+ break; -+ } -+ } -+ } else { -+ if (copy_from_user(scsi_sglist(cmd), buf, res->data_len)) { -+ return -EFAULT; -+ } ++ /* Reserve both a message and a submission, so we don't run into issues later. */ ++ reservation1 = bce_reserve_submission(urb->q->vhci->msg_asynchronous.sq, timeout); ++ if (!reservation1) ++ reservation2 = bce_reserve_submission(urb->q->sq_in, timeout); ++ if (reservation1 || reservation2) { ++ pr_err("bce-vhci: Failed to reserve a submission for URB data transfer\n"); ++ if (!reservation1) ++ bce_cancel_submission_reservation(urb->q->vhci->msg_asynchronous.sq); ++ return -ENOMEM; ++ } + -+ to_read -= res->data_len; -+ } ++ urb->send_offset = urb->receive_offset; + -+ scsi_set_resid(cmd, to_read); ++ tr_len = urb->urb->transfer_buffer_length - urb->send_offset; + -+ ret += res->data_len - to_read; -+ } ++ spin_lock(&urb->q->vhci->msg_asynchronous_lock); ++ msg.cmd = BCE_VHCI_CMD_TRANSFER_REQUEST; ++ msg.status = 0; ++ msg.param1 = ((urb->urb->ep->desc.bEndpointAddress & 0x8Fu) << 8) | urb->q->dev_addr; ++ msg.param2 = tr_len; ++ bce_vhci_message_queue_write(&urb->q->vhci->msg_asynchronous, &msg); ++ spin_unlock(&urb->q->vhci->msg_asynchronous_lock); + -+ return ret; ++ s = bce_next_submission(urb->q->sq_in); ++ bce_set_submission_single(s, urb->urb->transfer_dma + urb->send_offset, tr_len); ++ bce_submit_to_device(urb->q->sq_in); ++ ++ urb->state = BCE_VHCI_URB_WAITING_FOR_COMPLETION; ++ return 0; +} + -+static struct vhba_command *next_command (struct vhba_device *vdev) ++static int bce_vhci_urb_data_start(struct bce_vhci_urb *urb, unsigned long *timeout) +{ -+ struct vhba_command *vcmd; -+ -+ list_for_each_entry(vcmd, &vdev->cmd_list, entry) { -+ if (vcmd->status == VHBA_REQ_PENDING) { -+ break; -+ } ++ if (urb->dir == DMA_TO_DEVICE) { ++ if (urb->urb->transfer_buffer_length > 0) ++ urb->state = BCE_VHCI_URB_WAITING_FOR_TRANSFER_REQUEST; ++ else ++ urb->state = BCE_VHCI_URB_DATA_TRANSFER_COMPLETE; ++ return 0; ++ } else { ++ return bce_vhci_urb_data_transfer_in(urb, timeout); + } ++} + -+ if (&vcmd->entry == &vdev->cmd_list) { -+ vcmd = NULL; ++static int bce_vhci_urb_send_out_data(struct bce_vhci_urb *urb, dma_addr_t addr, size_t size) ++{ ++ struct bce_qe_submission *s; ++ unsigned long timeout = 0; ++ if (bce_reserve_submission(urb->q->sq_out, &timeout)) { ++ pr_err("bce-vhci: Failed to reserve a submission for URB data transfer\n"); ++ return -EPIPE; + } + -+ return vcmd; ++ pr_debug("bce-vhci: [%02x] DMA to device %llx %lx\n", urb->q->endp_addr, (u64) addr, size); ++ ++ s = bce_next_submission(urb->q->sq_out); ++ bce_set_submission_single(s, addr, size); ++ bce_submit_to_device(urb->q->sq_out); ++ return 0; +} + -+static struct vhba_command *match_command (struct vhba_device *vdev, __u32 metatag) ++static int bce_vhci_urb_data_update(struct bce_vhci_urb *urb, struct bce_vhci_message *msg) +{ -+ struct vhba_command *vcmd; -+ -+ list_for_each_entry(vcmd, &vdev->cmd_list, entry) { -+ if (vcmd->metatag == metatag) { -+ break; ++ u32 tr_len; ++ int status; ++ if (urb->state == BCE_VHCI_URB_WAITING_FOR_TRANSFER_REQUEST) { ++ if (msg->cmd == BCE_VHCI_CMD_TRANSFER_REQUEST) { ++ tr_len = min(urb->urb->transfer_buffer_length - urb->send_offset, (u32) msg->param2); ++ if ((status = bce_vhci_urb_send_out_data(urb, urb->urb->transfer_dma + urb->send_offset, tr_len))) ++ return status; ++ urb->send_offset += tr_len; ++ urb->state = BCE_VHCI_URB_WAITING_FOR_COMPLETION; ++ return 0; + } + } + -+ if (&vcmd->entry == &vdev->cmd_list) { -+ vcmd = NULL; ++ /* 0x1000 in out queues aren't really unexpected */ ++ if (msg->cmd == BCE_VHCI_CMD_TRANSFER_REQUEST && urb->q->sq_out != NULL) ++ return -EAGAIN; ++ pr_err("bce-vhci: [%02x] %s URB unexpected message (state = %x, msg: %x %x %x %llx)\n", ++ urb->q->endp_addr, (urb->is_control ? "Control (data update)" : "Data"), urb->state, ++ msg->cmd, msg->status, msg->param1, msg->param2); ++ return -EAGAIN; ++} ++ ++static int bce_vhci_urb_data_transfer_completion(struct bce_vhci_urb *urb, struct bce_sq_completion_data *c) ++{ ++ if (urb->state == BCE_VHCI_URB_WAITING_FOR_COMPLETION) { ++ urb->receive_offset += c->data_size; ++ if (urb->dir == DMA_FROM_DEVICE || urb->receive_offset >= urb->urb->transfer_buffer_length) { ++ urb->urb->actual_length = (u32) urb->receive_offset; ++ urb->state = BCE_VHCI_URB_DATA_TRANSFER_COMPLETE; ++ if (!urb->is_control) { ++ bce_vhci_urb_complete(urb, 0); ++ return -ENOENT; ++ } ++ } ++ } else { ++ pr_err("bce-vhci: [%02x] Data URB unexpected completion\n", urb->q->endp_addr); + } -+ -+ return vcmd; ++ return 0; +} + -+static struct vhba_command *wait_command (struct vhba_device *vdev, unsigned long flags) -+{ -+ struct vhba_command *vcmd; -+ DEFINE_WAIT(wait); + -+ while (!(vcmd = next_command(vdev))) { -+ if (signal_pending(current)) { -+ break; ++static int bce_vhci_urb_control_check_status(struct bce_vhci_urb *urb) ++{ ++ struct bce_vhci_transfer_queue *q = urb->q; ++ if (urb->received_status == 0) ++ return 0; ++ if (urb->state == BCE_VHCI_URB_DATA_TRANSFER_COMPLETE || ++ (urb->received_status != BCE_VHCI_SUCCESS && urb->state != BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_REQUEST && ++ urb->state != BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_COMPLETION)) { ++ urb->state = BCE_VHCI_URB_CONTROL_COMPLETE; ++ if (urb->received_status != BCE_VHCI_SUCCESS) { ++ pr_err("bce-vhci: [%02x] URB failed: %x\n", urb->q->endp_addr, urb->received_status); ++ urb->q->active = false; ++ urb->q->stalled = true; ++ bce_vhci_urb_complete(urb, -EPIPE); ++ if (!list_empty(&q->endp->urb_list)) ++ bce_vhci_transfer_queue_request_reset(q); ++ return -ENOENT; + } ++ bce_vhci_urb_complete(urb, 0); ++ return -ENOENT; ++ } ++ return 0; ++} + -+ prepare_to_wait(&vdev->cmd_wq, &wait, TASK_INTERRUPTIBLE); -+ -+ spin_unlock_irqrestore(&vdev->cmd_lock, flags); -+ -+ schedule(); ++static int bce_vhci_urb_control_update(struct bce_vhci_urb *urb, struct bce_vhci_message *msg) ++{ ++ int status; ++ if (msg->cmd == BCE_VHCI_CMD_CONTROL_TRANSFER_STATUS) { ++ urb->received_status = msg->status; ++ return bce_vhci_urb_control_check_status(urb); ++ } + -+ spin_lock_irqsave(&vdev->cmd_lock, flags); ++ if (urb->state == BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_REQUEST) { ++ if (msg->cmd == BCE_VHCI_CMD_TRANSFER_REQUEST) { ++ if (bce_vhci_urb_send_out_data(urb, urb->urb->setup_dma, sizeof(struct usb_ctrlrequest))) { ++ pr_err("bce-vhci: [%02x] Failed to start URB setup transfer\n", urb->q->endp_addr); ++ return 0; /* TODO: fail the URB? */ ++ } ++ urb->state = BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_COMPLETION; ++ pr_debug("bce-vhci: [%02x] Sent setup %llx\n", urb->q->endp_addr, urb->urb->setup_dma); ++ return 0; ++ } ++ } else if (urb->state == BCE_VHCI_URB_WAITING_FOR_TRANSFER_REQUEST || ++ urb->state == BCE_VHCI_URB_WAITING_FOR_COMPLETION) { ++ if ((status = bce_vhci_urb_data_update(urb, msg))) ++ return status; ++ return bce_vhci_urb_control_check_status(urb); + } + -+ finish_wait(&vdev->cmd_wq, &wait); -+ if (vcmd) { -+ vcmd->status = VHBA_REQ_READING; ++ /* 0x1000 in out queues aren't really unexpected */ ++ if (msg->cmd == BCE_VHCI_CMD_TRANSFER_REQUEST && urb->q->sq_out != NULL) ++ return -EAGAIN; ++ pr_err("bce-vhci: [%02x] Control URB unexpected message (state = %x, msg: %x %x %x %llx)\n", urb->q->endp_addr, ++ urb->state, msg->cmd, msg->status, msg->param1, msg->param2); ++ return -EAGAIN; ++} ++ ++static int bce_vhci_urb_control_transfer_completion(struct bce_vhci_urb *urb, struct bce_sq_completion_data *c) ++{ ++ int status; ++ unsigned long timeout; ++ ++ if (urb->state == BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_COMPLETION) { ++ if (c->data_size != sizeof(struct usb_ctrlrequest)) ++ pr_err("bce-vhci: [%02x] transfer complete data size mistmatch for usb_ctrlrequest (%llx instead of %lx)\n", ++ urb->q->endp_addr, c->data_size, sizeof(struct usb_ctrlrequest)); ++ ++ timeout = 1000; ++ status = bce_vhci_urb_data_start(urb, &timeout); ++ if (status) { ++ bce_vhci_urb_complete(urb, status); ++ return -ENOENT; ++ } ++ return 0; ++ } else if (urb->state == BCE_VHCI_URB_WAITING_FOR_TRANSFER_REQUEST || ++ urb->state == BCE_VHCI_URB_WAITING_FOR_COMPLETION) { ++ if ((status = bce_vhci_urb_data_transfer_completion(urb, c))) ++ return status; ++ return bce_vhci_urb_control_check_status(urb); ++ } else { ++ pr_err("bce-vhci: [%02x] Control URB unexpected completion (state = %x)\n", urb->q->endp_addr, urb->state); + } ++ return 0; ++} + -+ return vcmd; ++static int bce_vhci_urb_update(struct bce_vhci_urb *urb, struct bce_vhci_message *msg) ++{ ++ if (urb->state == BCE_VHCI_URB_INIT_PENDING) ++ return -EAGAIN; ++ if (urb->is_control) ++ return bce_vhci_urb_control_update(urb, msg); ++ else ++ return bce_vhci_urb_data_update(urb, msg); +} + -+static ssize_t vhba_ctl_read (struct file *file, char __user *buf, size_t buf_len, loff_t *offset) ++static int bce_vhci_urb_transfer_completion(struct bce_vhci_urb *urb, struct bce_sq_completion_data *c) +{ -+ struct vhba_device *vdev; -+ struct vhba_command *vcmd; -+ ssize_t ret; -+ unsigned long flags; ++ if (urb->is_control) ++ return bce_vhci_urb_control_transfer_completion(urb, c); ++ else ++ return bce_vhci_urb_data_transfer_completion(urb, c); ++} + -+ vdev = file->private_data; ++static void bce_vhci_urb_resume(struct bce_vhci_urb *urb) ++{ ++ int status = 0; ++ if (urb->state == BCE_VHCI_URB_WAITING_FOR_COMPLETION) { ++ status = bce_vhci_urb_data_transfer_in(urb, NULL); ++ } ++ if (status) ++ bce_vhci_urb_complete(urb, status); ++} +diff --git a/drivers/staging/apple-bce/vhci/transfer.h b/drivers/staging/apple-bce/vhci/transfer.h +new file mode 100644 +index 000000000000..89ecad6bcf8f +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/transfer.h +@@ -0,0 +1,73 @@ ++#ifndef BCEDRIVER_TRANSFER_H ++#define BCEDRIVER_TRANSFER_H + -+ /* Get next command */ -+ if (file->f_flags & O_NONBLOCK) { -+ /* Non-blocking variant */ -+ spin_lock_irqsave(&vdev->cmd_lock, flags); -+ vcmd = next_command(vdev); -+ spin_unlock_irqrestore(&vdev->cmd_lock, flags); ++#include ++#include "queue.h" ++#include "command.h" ++#include "../queue.h" + -+ if (!vcmd) { -+ return -EWOULDBLOCK; -+ } -+ } else { -+ /* Blocking variant */ -+ spin_lock_irqsave(&vdev->cmd_lock, flags); -+ vcmd = wait_command(vdev, flags); -+ spin_unlock_irqrestore(&vdev->cmd_lock, flags); ++struct bce_vhci_list_message { ++ struct list_head list; ++ struct bce_vhci_message msg; ++}; ++enum bce_vhci_pause_source { ++ BCE_VHCI_PAUSE_INTERNAL_WQ = 1, ++ BCE_VHCI_PAUSE_FIRMWARE = 2, ++ BCE_VHCI_PAUSE_SUSPEND = 4, ++ BCE_VHCI_PAUSE_SHUTDOWN = 8 ++}; ++struct bce_vhci_transfer_queue { ++ struct bce_vhci *vhci; ++ struct usb_host_endpoint *endp; ++ enum bce_vhci_endpoint_state state; ++ u32 max_active_requests, remaining_active_requests; ++ bool active, stalled; ++ u32 paused_by; ++ bce_vhci_device_t dev_addr; ++ u8 endp_addr; ++ struct bce_queue_cq *cq; ++ struct bce_queue_sq *sq_in; ++ struct bce_queue_sq *sq_out; ++ struct list_head evq; ++ struct spinlock urb_lock; ++ struct mutex pause_lock; ++ struct list_head giveback_urb_list; ++ ++ struct work_struct w_reset; ++}; ++enum bce_vhci_urb_state { ++ BCE_VHCI_URB_INIT_PENDING, + -+ if (!vcmd) { -+ return -ERESTARTSYS; -+ } -+ } ++ BCE_VHCI_URB_WAITING_FOR_TRANSFER_REQUEST, ++ BCE_VHCI_URB_WAITING_FOR_COMPLETION, ++ BCE_VHCI_URB_DATA_TRANSFER_COMPLETE, + -+ ret = do_request(vdev, vcmd->metatag, vcmd->cmd, buf, buf_len); ++ BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_REQUEST, ++ BCE_VHCI_URB_CONTROL_WAITING_FOR_SETUP_COMPLETION, ++ BCE_VHCI_URB_CONTROL_COMPLETE ++}; ++struct bce_vhci_urb { ++ struct urb *urb; ++ struct bce_vhci_transfer_queue *q; ++ enum dma_data_direction dir; ++ bool is_control; ++ enum bce_vhci_urb_state state; ++ int received_status; ++ u32 send_offset; ++ u32 receive_offset; ++}; + -+ spin_lock_irqsave(&vdev->cmd_lock, flags); -+ if (ret >= 0) { -+ vcmd->status = VHBA_REQ_SENT; -+ *offset += ret; -+ } else { -+ vcmd->status = VHBA_REQ_PENDING; -+ } ++void bce_vhci_create_transfer_queue(struct bce_vhci *vhci, struct bce_vhci_transfer_queue *q, ++ struct usb_host_endpoint *endp, bce_vhci_device_t dev_addr, enum dma_data_direction dir); ++void bce_vhci_destroy_transfer_queue(struct bce_vhci *vhci, struct bce_vhci_transfer_queue *q); ++void bce_vhci_transfer_queue_event(struct bce_vhci_transfer_queue *q, struct bce_vhci_message *msg); ++int bce_vhci_transfer_queue_do_pause(struct bce_vhci_transfer_queue *q); ++int bce_vhci_transfer_queue_do_resume(struct bce_vhci_transfer_queue *q); ++int bce_vhci_transfer_queue_pause(struct bce_vhci_transfer_queue *q, enum bce_vhci_pause_source src); ++int bce_vhci_transfer_queue_resume(struct bce_vhci_transfer_queue *q, enum bce_vhci_pause_source src); ++void bce_vhci_transfer_queue_request_reset(struct bce_vhci_transfer_queue *q); ++ ++int bce_vhci_urb_create(struct bce_vhci_transfer_queue *q, struct urb *urb); ++int bce_vhci_urb_request_cancel(struct bce_vhci_transfer_queue *q, struct urb *urb, int status); ++ ++#endif //BCEDRIVER_TRANSFER_H +diff --git a/drivers/staging/apple-bce/vhci/vhci.c b/drivers/staging/apple-bce/vhci/vhci.c +new file mode 100644 +index 000000000000..eb26f55000d8 +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/vhci.c +@@ -0,0 +1,759 @@ ++#include "vhci.h" ++#include "../apple_bce.h" ++#include "command.h" ++#include ++#include ++#include ++#include + -+ spin_unlock_irqrestore(&vdev->cmd_lock, flags); ++static dev_t bce_vhci_chrdev; ++static struct class *bce_vhci_class; ++static const struct hc_driver bce_vhci_driver; ++static u16 bce_vhci_port_mask = U16_MAX; + -+ return ret; -+} ++static int bce_vhci_create_event_queues(struct bce_vhci *vhci); ++static void bce_vhci_destroy_event_queues(struct bce_vhci *vhci); ++static int bce_vhci_create_message_queues(struct bce_vhci *vhci); ++static void bce_vhci_destroy_message_queues(struct bce_vhci *vhci); ++static void bce_vhci_handle_firmware_events_w(struct work_struct *ws); ++static void bce_vhci_firmware_event_completion(struct bce_queue_sq *sq); + -+static ssize_t vhba_ctl_write (struct file *file, const char __user *buf, size_t buf_len, loff_t *offset) ++int bce_vhci_create(struct apple_bce_device *dev, struct bce_vhci *vhci) +{ -+ struct vhba_device *vdev; -+ struct vhba_command *vcmd; -+ struct vhba_response res; -+ ssize_t ret; -+ unsigned long flags; ++ int status; + -+ if (buf_len < sizeof(res)) { -+ return -EIO; -+ } ++ spin_lock_init(&vhci->hcd_spinlock); + -+ if (copy_from_user(&res, buf, sizeof(res))) { -+ return -EFAULT; ++ vhci->dev = dev; ++ ++ vhci->vdevt = bce_vhci_chrdev; ++ vhci->vdev = device_create(bce_vhci_class, dev->dev, vhci->vdevt, NULL, "bce-vhci"); ++ if (IS_ERR_OR_NULL(vhci->vdev)) { ++ status = PTR_ERR(vhci->vdev); ++ goto fail_dev; + } + -+ vdev = file->private_data; ++ if ((status = bce_vhci_create_message_queues(vhci))) ++ goto fail_mq; ++ if ((status = bce_vhci_create_event_queues(vhci))) ++ goto fail_eq; + -+ spin_lock_irqsave(&vdev->cmd_lock, flags); -+ vcmd = match_command(vdev, res.metatag); -+ if (!vcmd || vcmd->status != VHBA_REQ_SENT) { -+ spin_unlock_irqrestore(&vdev->cmd_lock, flags); -+ pr_debug("ctl dev #%u not expecting response\n", vdev->num); -+ return -EIO; ++ vhci->tq_state_wq = alloc_ordered_workqueue("bce-vhci-tq-state", 0); ++ INIT_WORK(&vhci->w_fw_events, bce_vhci_handle_firmware_events_w); ++ ++ vhci->hcd = usb_create_hcd(&bce_vhci_driver, vhci->vdev, "bce-vhci"); ++ if (!vhci->hcd) { ++ status = -ENOMEM; ++ goto fail_hcd; + } -+ vcmd->status = VHBA_REQ_WRITING; -+ spin_unlock_irqrestore(&vdev->cmd_lock, flags); ++ vhci->hcd->self.sysdev = &dev->pci->dev; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) ++ vhci->hcd->self.uses_dma = 1; ++#endif ++ *((struct bce_vhci **) vhci->hcd->hcd_priv) = vhci; ++ vhci->hcd->speed = HCD_USB2; + -+ ret = do_response(vdev, vcmd->metatag, vcmd->cmd, buf + sizeof(res), buf_len - sizeof(res), &res); ++ if ((status = usb_add_hcd(vhci->hcd, 0, 0))) ++ goto fail_hcd; + -+ spin_lock_irqsave(&vdev->cmd_lock, flags); -+ if (ret >= 0) { -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0) -+ scsi_done(vcmd->cmd); -+#else -+ vcmd->cmd->scsi_done(vcmd->cmd); -+#endif -+ ret += sizeof(res); ++ return 0; + -+ /* don't compete with vhba_device_dequeue */ -+ if (!list_empty(&vcmd->entry)) { -+ list_del_init(&vcmd->entry); -+ vhba_free_command(vcmd); -+ } -+ } else { -+ vcmd->status = VHBA_REQ_SENT; -+ } ++fail_hcd: ++ bce_vhci_destroy_event_queues(vhci); ++fail_eq: ++ bce_vhci_destroy_message_queues(vhci); ++fail_mq: ++ device_destroy(bce_vhci_class, vhci->vdevt); ++fail_dev: ++ if (!status) ++ status = -EINVAL; ++ return status; ++} + -+ spin_unlock_irqrestore(&vdev->cmd_lock, flags); ++void bce_vhci_destroy(struct bce_vhci *vhci) ++{ ++ usb_remove_hcd(vhci->hcd); ++ bce_vhci_destroy_event_queues(vhci); ++ bce_vhci_destroy_message_queues(vhci); ++ device_destroy(bce_vhci_class, vhci->vdevt); ++} + -+ return ret; ++struct bce_vhci *bce_vhci_from_hcd(struct usb_hcd *hcd) ++{ ++ return *((struct bce_vhci **) hcd->hcd_priv); +} + -+static long vhba_ctl_ioctl (struct file *file, unsigned int cmd, unsigned long arg) ++int bce_vhci_start(struct usb_hcd *hcd) +{ -+ struct vhba_device *vdev = file->private_data; -+ struct vhba_host *vhost = platform_get_drvdata(&vhba_platform_device); ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ int status; ++ u16 port_mask = 0; ++ bce_vhci_port_t port_no = 0; ++ if ((status = bce_vhci_cmd_controller_enable(&vhci->cq, 1, &port_mask))) ++ return status; ++ vhci->port_mask = port_mask; ++ vhci->port_power_mask = 0; ++ if ((status = bce_vhci_cmd_controller_start(&vhci->cq))) ++ return status; ++ port_mask = vhci->port_mask; ++ while (port_mask) { ++ port_no += 1; ++ port_mask >>= 1; ++ } ++ vhci->port_count = port_no; ++ return 0; ++} + -+ switch (cmd) { -+ case 0xBEEF001: { -+ unsigned int ident[4]; /* host, channel, id, lun */ ++void bce_vhci_stop(struct usb_hcd *hcd) ++{ ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ bce_vhci_cmd_controller_disable(&vhci->cq); ++} + -+ ident[0] = vhost->shost->host_no; -+ devnum_to_bus_and_id(vdev->num, &ident[1], &ident[2]); -+ ident[3] = 0; /* lun */ ++static int bce_vhci_hub_status_data(struct usb_hcd *hcd, char *buf) ++{ ++ return 0; ++} + -+ if (copy_to_user((void *) arg, ident, sizeof(ident))) { -+ return -EFAULT; -+ } ++static int bce_vhci_reset_device(struct bce_vhci *vhci, int index, u16 timeout); ++ ++static int bce_vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) ++{ ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ int status; ++ struct usb_hub_descriptor *hd; ++ struct usb_hub_status *hs; ++ struct usb_port_status *ps; ++ u32 port_status; ++ // pr_info("bce-vhci: bce_vhci_hub_control %x %i %i [bufl=%i]\n", typeReq, wValue, wIndex, wLength); ++ if (typeReq == GetHubDescriptor && wLength >= sizeof(struct usb_hub_descriptor)) { ++ hd = (struct usb_hub_descriptor *) buf; ++ memset(hd, 0, sizeof(*hd)); ++ hd->bDescLength = sizeof(struct usb_hub_descriptor); ++ hd->bDescriptorType = USB_DT_HUB; ++ hd->bNbrPorts = (u8) vhci->port_count; ++ hd->wHubCharacteristics = HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_INDV_PORT_OCPM; ++ hd->bPwrOn2PwrGood = 0; ++ hd->bHubContrCurrent = 0; ++ return 0; ++ } else if (typeReq == GetHubStatus && wLength >= sizeof(struct usb_hub_status)) { ++ hs = (struct usb_hub_status *) buf; ++ memset(hs, 0, sizeof(*hs)); ++ hs->wHubStatus = 0; ++ hs->wHubChange = 0; ++ return 0; ++ } else if (typeReq == GetPortStatus && wLength >= 4 /* usb 2.0 */) { ++ ps = (struct usb_port_status *) buf; ++ ps->wPortStatus = 0; ++ ps->wPortChange = 0; + ++ if (vhci->port_power_mask & BIT(wIndex)) ++ ps->wPortStatus |= USB_PORT_STAT_POWER; ++ ++ if (!(bce_vhci_port_mask & BIT(wIndex))) + return 0; -+ } -+ case 0xBEEF002: { -+ unsigned int devnum = vdev->num; + -+ if (copy_to_user((void *) arg, &devnum, sizeof(devnum))) { -+ return -EFAULT; -+ } ++ if ((status = bce_vhci_cmd_port_status(&vhci->cq, (u8) wIndex, 0, &port_status))) ++ return status; ++ ++ if (port_status & 16) ++ ps->wPortStatus |= USB_PORT_STAT_ENABLE | USB_PORT_STAT_HIGH_SPEED; ++ if (port_status & 4) ++ ps->wPortStatus |= USB_PORT_STAT_CONNECTION; ++ if (port_status & 2) ++ ps->wPortStatus |= USB_PORT_STAT_OVERCURRENT; ++ if (port_status & 8) ++ ps->wPortStatus |= USB_PORT_STAT_RESET; ++ if (port_status & 0x60) ++ ps->wPortStatus |= USB_PORT_STAT_SUSPEND; + ++ if (port_status & 0x40000) ++ ps->wPortChange |= USB_PORT_STAT_C_CONNECTION; ++ ++ pr_debug("bce-vhci: Translated status %x to %x:%x\n", port_status, ps->wPortStatus, ps->wPortChange); ++ return 0; ++ } else if (typeReq == SetPortFeature) { ++ if (wValue == USB_PORT_FEAT_POWER) { ++ status = bce_vhci_cmd_port_power_on(&vhci->cq, (u8) wIndex); ++ /* As far as I am aware, power status is not part of the port status so store it separately */ ++ if (!status) ++ vhci->port_power_mask |= BIT(wIndex); ++ return status; ++ } ++ if (wValue == USB_PORT_FEAT_RESET) { ++ return bce_vhci_reset_device(vhci, wIndex, wValue); ++ } ++ if (wValue == USB_PORT_FEAT_SUSPEND) { ++ /* TODO: Am I supposed to also suspend the endpoints? */ ++ pr_debug("bce-vhci: Suspending port %i\n", wIndex); ++ return bce_vhci_cmd_port_suspend(&vhci->cq, (u8) wIndex); ++ } ++ } else if (typeReq == ClearPortFeature) { ++ if (wValue == USB_PORT_FEAT_ENABLE) ++ return bce_vhci_cmd_port_disable(&vhci->cq, (u8) wIndex); ++ if (wValue == USB_PORT_FEAT_POWER) { ++ status = bce_vhci_cmd_port_power_off(&vhci->cq, (u8) wIndex); ++ if (!status) ++ vhci->port_power_mask &= ~BIT(wIndex); ++ return status; ++ } ++ if (wValue == USB_PORT_FEAT_C_CONNECTION) ++ return bce_vhci_cmd_port_status(&vhci->cq, (u8) wIndex, 0x40000, &port_status); ++ if (wValue == USB_PORT_FEAT_C_RESET) { /* I don't think I can transfer it in any way */ + return 0; + } ++ if (wValue == USB_PORT_FEAT_SUSPEND) { ++ pr_debug("bce-vhci: Resuming port %i\n", wIndex); ++ return bce_vhci_cmd_port_resume(&vhci->cq, (u8) wIndex); ++ } + } -+ -+ return -ENOTTY; ++ pr_err("bce-vhci: bce_vhci_hub_control unhandled request: %x %i %i [bufl=%i]\n", typeReq, wValue, wIndex, wLength); ++ dump_stack(); ++ return -EIO; +} + -+#ifdef CONFIG_COMPAT -+static long vhba_ctl_compat_ioctl (struct file *file, unsigned int cmd, unsigned long arg) ++static int bce_vhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev) +{ -+ unsigned long compat_arg = (unsigned long)compat_ptr(arg); -+ return vhba_ctl_ioctl(file, cmd, compat_arg); ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ struct bce_vhci_device *vdev; ++ bce_vhci_device_t devid; ++ pr_info("bce_vhci_enable_device\n"); ++ ++ if (vhci->port_to_device[udev->portnum]) ++ return 0; ++ ++ /* We need to early address the device */ ++ if (bce_vhci_cmd_device_create(&vhci->cq, udev->portnum, &devid)) ++ return -EIO; ++ ++ pr_info("bce_vhci_cmd_device_create %i -> %i\n", udev->portnum, devid); ++ ++ vdev = kzalloc(sizeof(struct bce_vhci_device), GFP_KERNEL); ++ vhci->port_to_device[udev->portnum] = devid; ++ vhci->devices[devid] = vdev; ++ ++ bce_vhci_create_transfer_queue(vhci, &vdev->tq[0], &udev->ep0, devid, DMA_BIDIRECTIONAL); ++ udev->ep0.hcpriv = &vdev->tq[0]; ++ vdev->tq_mask |= BIT(0); ++ ++ bce_vhci_cmd_endpoint_create(&vhci->cq, devid, &udev->ep0.desc); ++ return 0; +} -+#endif + -+static unsigned int vhba_ctl_poll (struct file *file, poll_table *wait) ++static int bce_vhci_address_device(struct usb_hcd *hcd, struct usb_device *udev, unsigned int timeout_ms) //TODO: follow timeout +{ -+ struct vhba_device *vdev = file->private_data; -+ unsigned int mask = 0; -+ unsigned long flags; ++ /* This is the same as enable_device, but instead in the old scheme */ ++ return bce_vhci_enable_device(hcd, udev); ++} + -+ poll_wait(file, &vdev->cmd_wq, wait); ++static void bce_vhci_free_device(struct usb_hcd *hcd, struct usb_device *udev) ++{ ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ int i; ++ bce_vhci_device_t devid; ++ struct bce_vhci_device *dev; ++ pr_info("bce_vhci_free_device %i\n", udev->portnum); ++ if (!vhci->port_to_device[udev->portnum]) ++ return; ++ devid = vhci->port_to_device[udev->portnum]; ++ dev = vhci->devices[devid]; ++ for (i = 0; i < 32; i++) { ++ if (dev->tq_mask & BIT(i)) { ++ bce_vhci_transfer_queue_pause(&dev->tq[i], BCE_VHCI_PAUSE_SHUTDOWN); ++ bce_vhci_cmd_endpoint_destroy(&vhci->cq, devid, (u8) i); ++ bce_vhci_destroy_transfer_queue(vhci, &dev->tq[i]); ++ } ++ } ++ vhci->devices[devid] = NULL; ++ vhci->port_to_device[udev->portnum] = 0; ++ bce_vhci_cmd_device_destroy(&vhci->cq, devid); ++ kfree(dev); ++} + -+ spin_lock_irqsave(&vdev->cmd_lock, flags); -+ if (next_command(vdev)) { -+ mask |= POLLIN | POLLRDNORM; ++static int bce_vhci_reset_device(struct bce_vhci *vhci, int index, u16 timeout) ++{ ++ struct bce_vhci_device *dev = NULL; ++ bce_vhci_device_t devid; ++ int i; ++ int status; ++ enum dma_data_direction dir; ++ pr_info("bce_vhci_reset_device %i\n", index); ++ ++ devid = vhci->port_to_device[index]; ++ if (devid) { ++ dev = vhci->devices[devid]; ++ ++ for (i = 0; i < 32; i++) { ++ if (dev->tq_mask & BIT(i)) { ++ bce_vhci_transfer_queue_pause(&dev->tq[i], BCE_VHCI_PAUSE_SHUTDOWN); ++ bce_vhci_cmd_endpoint_destroy(&vhci->cq, devid, (u8) i); ++ bce_vhci_destroy_transfer_queue(vhci, &dev->tq[i]); ++ } ++ } ++ vhci->devices[devid] = NULL; ++ vhci->port_to_device[index] = 0; ++ bce_vhci_cmd_device_destroy(&vhci->cq, devid); ++ } ++ status = bce_vhci_cmd_port_reset(&vhci->cq, (u8) index, timeout); ++ ++ if (dev) { ++ if ((status = bce_vhci_cmd_device_create(&vhci->cq, index, &devid))) ++ return status; ++ vhci->devices[devid] = dev; ++ vhci->port_to_device[index] = devid; ++ ++ for (i = 0; i < 32; i++) { ++ if (dev->tq_mask & BIT(i)) { ++ dir = usb_endpoint_dir_in(&dev->tq[i].endp->desc) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; ++ if (i == 0) ++ dir = DMA_BIDIRECTIONAL; ++ bce_vhci_create_transfer_queue(vhci, &dev->tq[i], dev->tq[i].endp, devid, dir); ++ bce_vhci_cmd_endpoint_create(&vhci->cq, devid, &dev->tq[i].endp->desc); ++ } ++ } + } -+ spin_unlock_irqrestore(&vdev->cmd_lock, flags); + -+ return mask; ++ return status; +} + -+static int vhba_ctl_open (struct inode *inode, struct file *file) ++static int bce_vhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +{ -+ struct vhba_device *vdev; -+ int retval; ++ return 0; ++} + -+ pr_debug("ctl dev open\n"); ++static int bce_vhci_get_frame_number(struct usb_hcd *hcd) ++{ ++ return 0; ++} + -+ /* check if vhba is probed */ -+ if (!platform_get_drvdata(&vhba_platform_device)) { -+ return -ENODEV; -+ } ++static int bce_vhci_bus_suspend(struct usb_hcd *hcd) ++{ ++ int i, j; ++ int status; ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ pr_info("bce_vhci: suspend started\n"); + -+ vdev = vhba_device_alloc(); -+ if (!vdev) { -+ return -ENOMEM; ++ pr_info("bce_vhci: suspend endpoints\n"); ++ for (i = 0; i < 16; i++) { ++ if (!vhci->port_to_device[i]) ++ continue; ++ for (j = 0; j < 32; j++) { ++ if (!(vhci->devices[vhci->port_to_device[i]]->tq_mask & BIT(j))) ++ continue; ++ bce_vhci_transfer_queue_pause(&vhci->devices[vhci->port_to_device[i]]->tq[j], ++ BCE_VHCI_PAUSE_SUSPEND); ++ } + } + -+ vdev->kbuf_size = VHBA_KBUF_SIZE; -+ vdev->kbuf = kzalloc(vdev->kbuf_size, GFP_KERNEL); -+ if (!vdev->kbuf) { -+ return -ENOMEM; ++ pr_info("bce_vhci: suspend ports\n"); ++ for (i = 0; i < 16; i++) { ++ if (!vhci->port_to_device[i]) ++ continue; ++ bce_vhci_cmd_port_suspend(&vhci->cq, i); + } ++ pr_info("bce_vhci: suspend controller\n"); ++ if ((status = bce_vhci_cmd_controller_pause(&vhci->cq))) ++ return status; ++ ++ bce_vhci_event_queue_pause(&vhci->ev_commands); ++ bce_vhci_event_queue_pause(&vhci->ev_system); ++ bce_vhci_event_queue_pause(&vhci->ev_isochronous); ++ bce_vhci_event_queue_pause(&vhci->ev_interrupt); ++ bce_vhci_event_queue_pause(&vhci->ev_asynchronous); ++ pr_info("bce_vhci: suspend done\n"); ++ return 0; ++} + -+ if (!(retval = vhba_add_device(vdev))) { -+ file->private_data = vdev; ++static int bce_vhci_bus_resume(struct usb_hcd *hcd) ++{ ++ int i, j; ++ int status; ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ pr_info("bce_vhci: resume started\n"); ++ ++ bce_vhci_event_queue_resume(&vhci->ev_system); ++ bce_vhci_event_queue_resume(&vhci->ev_isochronous); ++ bce_vhci_event_queue_resume(&vhci->ev_interrupt); ++ bce_vhci_event_queue_resume(&vhci->ev_asynchronous); ++ bce_vhci_event_queue_resume(&vhci->ev_commands); ++ ++ pr_info("bce_vhci: resume controller\n"); ++ if ((status = bce_vhci_cmd_controller_start(&vhci->cq))) ++ return status; ++ ++ pr_info("bce_vhci: resume ports\n"); ++ for (i = 0; i < 16; i++) { ++ if (!vhci->port_to_device[i]) ++ continue; ++ bce_vhci_cmd_port_resume(&vhci->cq, i); ++ } ++ pr_info("bce_vhci: resume endpoints\n"); ++ for (i = 0; i < 16; i++) { ++ if (!vhci->port_to_device[i]) ++ continue; ++ for (j = 0; j < 32; j++) { ++ if (!(vhci->devices[vhci->port_to_device[i]]->tq_mask & BIT(j))) ++ continue; ++ bce_vhci_transfer_queue_resume(&vhci->devices[vhci->port_to_device[i]]->tq[j], ++ BCE_VHCI_PAUSE_SUSPEND); ++ } + } + -+ vhba_device_put(vdev); -+ -+ return retval; ++ pr_info("bce_vhci: resume done\n"); ++ return 0; +} + -+static int vhba_ctl_release (struct inode *inode, struct file *file) ++static int bce_vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) +{ -+ struct vhba_device *vdev; -+ struct vhba_command *vcmd; -+ unsigned long flags; ++ struct bce_vhci_transfer_queue *q = urb->ep->hcpriv; ++ pr_debug("bce_vhci_urb_enqueue %i:%x\n", q->dev_addr, urb->ep->desc.bEndpointAddress); ++ if (!q) ++ return -ENOENT; ++ return bce_vhci_urb_create(q, urb); ++} + -+ vdev = file->private_data; ++static int bce_vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) ++{ ++ struct bce_vhci_transfer_queue *q = urb->ep->hcpriv; ++ pr_debug("bce_vhci_urb_dequeue %x\n", urb->ep->desc.bEndpointAddress); ++ return bce_vhci_urb_request_cancel(q, urb, status); ++} + -+ pr_debug("ctl dev release\n"); ++static void bce_vhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) ++{ ++ struct bce_vhci_transfer_queue *q = ep->hcpriv; ++ pr_debug("bce_vhci_endpoint_reset\n"); ++ if (q) ++ bce_vhci_transfer_queue_request_reset(q); ++} + -+ vhba_device_get(vdev); -+ vhba_remove_device(vdev); ++static u8 bce_vhci_endpoint_index(u8 addr) ++{ ++ if (addr & 0x80) ++ return (u8) (0x10 + (addr & 0xf)); ++ return (u8) (addr & 0xf); ++} + -+ spin_lock_irqsave(&vdev->cmd_lock, flags); -+ list_for_each_entry(vcmd, &vdev->cmd_list, entry) { -+ WARN_ON(vcmd->status == VHBA_REQ_READING || vcmd->status == VHBA_REQ_WRITING); ++static int bce_vhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *endp) ++{ ++ u8 endp_index = bce_vhci_endpoint_index(endp->desc.bEndpointAddress); ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ bce_vhci_device_t devid = vhci->port_to_device[udev->portnum]; ++ struct bce_vhci_device *vdev = vhci->devices[devid]; ++ pr_debug("bce_vhci_add_endpoint %x/%x:%x\n", udev->portnum, devid, endp_index); + -+ scmd_dbg(vcmd->cmd, "device released with command %lu (%p)\n", vcmd->metatag, vcmd->cmd); -+ vcmd->cmd->result = DID_NO_CONNECT << 16; -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0) -+ scsi_done(vcmd->cmd); -+#else -+ vcmd->cmd->scsi_done(vcmd->cmd); -+#endif -+ vhba_free_command(vcmd); ++ if (udev->bus->root_hub == udev) /* The USB hub */ ++ return 0; ++ if (vdev == NULL) ++ return -ENODEV; ++ if (vdev->tq_mask & BIT(endp_index)) { ++ endp->hcpriv = &vdev->tq[endp_index]; ++ return 0; + } -+ INIT_LIST_HEAD(&vdev->cmd_list); -+ spin_unlock_irqrestore(&vdev->cmd_lock, flags); + -+ kfree(vdev->kbuf); -+ vdev->kbuf = NULL; -+ -+ vhba_device_put(vdev); ++ bce_vhci_create_transfer_queue(vhci, &vdev->tq[endp_index], endp, devid, ++ usb_endpoint_dir_in(&endp->desc) ? DMA_FROM_DEVICE : DMA_TO_DEVICE); ++ endp->hcpriv = &vdev->tq[endp_index]; ++ vdev->tq_mask |= BIT(endp_index); + ++ bce_vhci_cmd_endpoint_create(&vhci->cq, devid, &endp->desc); + return 0; +} + -+static struct file_operations vhba_ctl_fops = { -+ .owner = THIS_MODULE, -+ .open = vhba_ctl_open, -+ .release = vhba_ctl_release, -+ .read = vhba_ctl_read, -+ .write = vhba_ctl_write, -+ .poll = vhba_ctl_poll, -+ .unlocked_ioctl = vhba_ctl_ioctl, -+#ifdef CONFIG_COMPAT -+ .compat_ioctl = vhba_ctl_compat_ioctl, -+#endif -+}; ++static int bce_vhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *endp) ++{ ++ u8 endp_index = bce_vhci_endpoint_index(endp->desc.bEndpointAddress); ++ struct bce_vhci *vhci = bce_vhci_from_hcd(hcd); ++ bce_vhci_device_t devid = vhci->port_to_device[udev->portnum]; ++ struct bce_vhci_transfer_queue *q = endp->hcpriv; ++ struct bce_vhci_device *vdev = vhci->devices[devid]; ++ pr_info("bce_vhci_drop_endpoint %x:%x\n", udev->portnum, endp_index); ++ if (!q) { ++ if (vdev && vdev->tq_mask & BIT(endp_index)) { ++ pr_err("something deleted the hcpriv?\n"); ++ q = &vdev->tq[endp_index]; ++ } else { ++ return 0; ++ } ++ } + -+static struct miscdevice vhba_miscdev = { -+ .minor = MISC_DYNAMIC_MINOR, -+ .name = "vhba_ctl", -+ .fops = &vhba_ctl_fops, -+}; ++ bce_vhci_cmd_endpoint_destroy(&vhci->cq, devid, (u8) (endp->desc.bEndpointAddress & 0x8Fu)); ++ vhci->devices[devid]->tq_mask &= ~BIT(endp_index); ++ bce_vhci_destroy_transfer_queue(vhci, q); ++ return 0; ++} + -+static int vhba_probe (struct platform_device *pdev) ++static int bce_vhci_create_message_queues(struct bce_vhci *vhci) +{ -+ struct Scsi_Host *shost; -+ struct vhba_host *vhost; -+ int i; ++ if (bce_vhci_message_queue_create(vhci, &vhci->msg_commands, "VHC1HostCommands") || ++ bce_vhci_message_queue_create(vhci, &vhci->msg_system, "VHC1HostSystemEvents") || ++ bce_vhci_message_queue_create(vhci, &vhci->msg_isochronous, "VHC1HostIsochronousEvents") || ++ bce_vhci_message_queue_create(vhci, &vhci->msg_interrupt, "VHC1HostInterruptEvents") || ++ bce_vhci_message_queue_create(vhci, &vhci->msg_asynchronous, "VHC1HostAsynchronousEvents")) { ++ bce_vhci_destroy_message_queues(vhci); ++ return -EINVAL; ++ } ++ spin_lock_init(&vhci->msg_asynchronous_lock); ++ bce_vhci_command_queue_create(&vhci->cq, &vhci->msg_commands); ++ return 0; ++} + -+ vhba_can_queue = clamp(vhba_can_queue, 1, 256); ++static void bce_vhci_destroy_message_queues(struct bce_vhci *vhci) ++{ ++ bce_vhci_command_queue_destroy(&vhci->cq); ++ bce_vhci_message_queue_destroy(vhci, &vhci->msg_commands); ++ bce_vhci_message_queue_destroy(vhci, &vhci->msg_system); ++ bce_vhci_message_queue_destroy(vhci, &vhci->msg_isochronous); ++ bce_vhci_message_queue_destroy(vhci, &vhci->msg_interrupt); ++ bce_vhci_message_queue_destroy(vhci, &vhci->msg_asynchronous); ++} + -+ shost = scsi_host_alloc(&vhba_template, sizeof(struct vhba_host)); -+ if (!shost) { -+ return -ENOMEM; ++static void bce_vhci_handle_system_event(struct bce_vhci_event_queue *q, struct bce_vhci_message *msg); ++static void bce_vhci_handle_usb_event(struct bce_vhci_event_queue *q, struct bce_vhci_message *msg); ++ ++static int bce_vhci_create_event_queues(struct bce_vhci *vhci) ++{ ++ vhci->ev_cq = bce_create_cq(vhci->dev, 0x100); ++ if (!vhci->ev_cq) ++ return -EINVAL; ++#define CREATE_EVENT_QUEUE(field, name, cb) bce_vhci_event_queue_create(vhci, &vhci->field, name, cb) ++ if (__bce_vhci_event_queue_create(vhci, &vhci->ev_commands, "VHC1FirmwareCommands", ++ bce_vhci_firmware_event_completion) || ++ CREATE_EVENT_QUEUE(ev_system, "VHC1FirmwareSystemEvents", bce_vhci_handle_system_event) || ++ CREATE_EVENT_QUEUE(ev_isochronous, "VHC1FirmwareIsochronousEvents", bce_vhci_handle_usb_event) || ++ CREATE_EVENT_QUEUE(ev_interrupt, "VHC1FirmwareInterruptEvents", bce_vhci_handle_usb_event) || ++ CREATE_EVENT_QUEUE(ev_asynchronous, "VHC1FirmwareAsynchronousEvents", bce_vhci_handle_usb_event)) { ++ bce_vhci_destroy_event_queues(vhci); ++ return -EINVAL; + } ++#undef CREATE_EVENT_QUEUE ++ return 0; ++} + -+ shost->max_channel = VHBA_MAX_BUS-1; -+ shost->max_id = VHBA_MAX_ID; -+ /* we don't support lun > 0 */ -+ shost->max_lun = 1; -+ shost->max_cmd_len = MAX_COMMAND_SIZE; -+ shost->can_queue = vhba_can_queue; -+ shost->cmd_per_lun = vhba_can_queue; ++static void bce_vhci_destroy_event_queues(struct bce_vhci *vhci) ++{ ++ bce_vhci_event_queue_destroy(vhci, &vhci->ev_commands); ++ bce_vhci_event_queue_destroy(vhci, &vhci->ev_system); ++ bce_vhci_event_queue_destroy(vhci, &vhci->ev_isochronous); ++ bce_vhci_event_queue_destroy(vhci, &vhci->ev_interrupt); ++ bce_vhci_event_queue_destroy(vhci, &vhci->ev_asynchronous); ++ if (vhci->ev_cq) ++ bce_destroy_cq(vhci->dev, vhci->ev_cq); ++} + -+ vhost = (struct vhba_host *)shost->hostdata; -+ memset(vhost, 0, sizeof(struct vhba_host)); ++static void bce_vhci_send_fw_event_response(struct bce_vhci *vhci, struct bce_vhci_message *req, u16 status) ++{ ++ unsigned long timeout = 1000; ++ struct bce_vhci_message r = *req; ++ r.cmd = (u16) (req->cmd | 0x8000u); ++ r.status = status; ++ r.param1 = req->param1; ++ r.param2 = 0; + -+ vhost->shost = shost; -+ vhost->num_devices = 0; -+ spin_lock_init(&vhost->dev_lock); -+ spin_lock_init(&vhost->cmd_lock); -+ INIT_WORK(&vhost->scan_devices, vhba_scan_devices); -+ vhost->cmd_next = 0; -+ vhost->commands = kzalloc(vhba_can_queue * sizeof(struct vhba_command), GFP_KERNEL); -+ if (!vhost->commands) { -+ return -ENOMEM; ++ if (bce_reserve_submission(vhci->msg_system.sq, &timeout)) { ++ pr_err("bce-vhci: Cannot reserve submision for FW event reply\n"); ++ return; + } ++ bce_vhci_message_queue_write(&vhci->msg_system, &r); ++} + -+ for (i = 0; i < vhba_can_queue; i++) { -+ vhost->commands[i].status = VHBA_REQ_FREE; ++static int bce_vhci_handle_firmware_event(struct bce_vhci *vhci, struct bce_vhci_message *msg) ++{ ++ unsigned long flags; ++ bce_vhci_device_t devid; ++ u8 endp; ++ struct bce_vhci_device *dev; ++ struct bce_vhci_transfer_queue *tq; ++ if (msg->cmd == BCE_VHCI_CMD_ENDPOINT_REQUEST_STATE || msg->cmd == BCE_VHCI_CMD_ENDPOINT_SET_STATE) { ++ devid = (bce_vhci_device_t) (msg->param1 & 0xff); ++ endp = bce_vhci_endpoint_index((u8) ((msg->param1 >> 8) & 0xff)); ++ dev = vhci->devices[devid]; ++ if (!dev || !(dev->tq_mask & BIT(endp))) ++ return BCE_VHCI_BAD_ARGUMENT; ++ tq = &dev->tq[endp]; + } + -+ platform_set_drvdata(pdev, vhost); -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) -+ i = scsi_init_shared_tag_map(shost, vhba_can_queue); -+ if (i) return i; -+#endif -+ -+ if (scsi_add_host(shost, &pdev->dev)) { -+ scsi_host_put(shost); -+ return -ENOMEM; ++ if (msg->cmd == BCE_VHCI_CMD_ENDPOINT_REQUEST_STATE) { ++ if (msg->param2 == BCE_VHCI_ENDPOINT_ACTIVE) { ++ bce_vhci_transfer_queue_resume(tq, BCE_VHCI_PAUSE_FIRMWARE); ++ return BCE_VHCI_SUCCESS; ++ } else if (msg->param2 == BCE_VHCI_ENDPOINT_PAUSED) { ++ bce_vhci_transfer_queue_pause(tq, BCE_VHCI_PAUSE_FIRMWARE); ++ return BCE_VHCI_SUCCESS; ++ } ++ return BCE_VHCI_BAD_ARGUMENT; ++ } else if (msg->cmd == BCE_VHCI_CMD_ENDPOINT_SET_STATE) { ++ if (msg->param2 == BCE_VHCI_ENDPOINT_STALLED) { ++ tq->state = msg->param2; ++ spin_lock_irqsave(&tq->urb_lock, flags); ++ tq->stalled = true; ++ spin_unlock_irqrestore(&tq->urb_lock, flags); ++ return BCE_VHCI_SUCCESS; ++ } ++ return BCE_VHCI_BAD_ARGUMENT; + } ++ pr_warn("bce-vhci: Unhandled firmware event: %x s=%x p1=%x p2=%llx\n", ++ msg->cmd, msg->status, msg->param1, msg->param2); ++ return BCE_VHCI_BAD_ARGUMENT; ++} ++ ++static void bce_vhci_handle_firmware_events_w(struct work_struct *ws) ++{ ++ size_t cnt = 0; ++ int result; ++ struct bce_vhci *vhci = container_of(ws, struct bce_vhci, w_fw_events); ++ struct bce_queue_sq *sq = vhci->ev_commands.sq; ++ struct bce_sq_completion_data *cq; ++ struct bce_vhci_message *msg, *msg2 = NULL; ++ ++ while (true) { ++ if (msg2) { ++ msg = msg2; ++ msg2 = NULL; ++ } else if ((cq = bce_next_completion(sq))) { ++ if (cq->status == BCE_COMPLETION_ABORTED) { ++ bce_notify_submission_complete(sq); ++ continue; ++ } ++ msg = &vhci->ev_commands.data[sq->head]; ++ } else { ++ break; ++ } + -+ return 0; -+} ++ pr_debug("bce-vhci: Got fw event: %x s=%x p1=%x p2=%llx\n", msg->cmd, msg->status, msg->param1, msg->param2); ++ if ((cq = bce_next_completion(sq))) { ++ msg2 = &vhci->ev_commands.data[(sq->head + 1) % sq->el_count]; ++ pr_debug("bce-vhci: Got second fw event: %x s=%x p1=%x p2=%llx\n", ++ msg->cmd, msg->status, msg->param1, msg->param2); ++ if (cq->status != BCE_COMPLETION_ABORTED && ++ msg2->cmd == (msg->cmd | 0x4000) && msg2->param1 == msg->param1) { ++ /* Take two elements */ ++ pr_debug("bce-vhci: Cancelled\n"); ++ bce_vhci_send_fw_event_response(vhci, msg, BCE_VHCI_ABORT); ++ ++ bce_notify_submission_complete(sq); ++ bce_notify_submission_complete(sq); ++ msg2 = NULL; ++ cnt += 2; ++ continue; ++ } + -+#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 11, 0) -+static int vhba_remove (struct platform_device *pdev) -+#else -+static void vhba_remove (struct platform_device *pdev) -+#endif -+{ -+ struct vhba_host *vhost; -+ struct Scsi_Host *shost; ++ pr_warn("bce-vhci: Handle fw event - unexpected cancellation\n"); ++ } + -+ vhost = platform_get_drvdata(pdev); -+ shost = vhost->shost; ++ result = bce_vhci_handle_firmware_event(vhci, msg); ++ bce_vhci_send_fw_event_response(vhci, msg, (u16) result); + -+ scsi_remove_host(shost); -+ scsi_host_put(shost); + -+ kfree(vhost->commands); ++ bce_notify_submission_complete(sq); ++ ++cnt; ++ } ++ bce_vhci_event_queue_submit_pending(&vhci->ev_commands, cnt); ++ if (atomic_read(&sq->available_commands) == sq->el_count - 1) { ++ pr_debug("bce-vhci: complete\n"); ++ complete(&vhci->ev_commands.queue_empty_completion); ++ } ++} + -+#if LINUX_VERSION_CODE < KERNEL_VERSION(6, 11, 0) -+ return 0; -+#endif ++static void bce_vhci_firmware_event_completion(struct bce_queue_sq *sq) ++{ ++ struct bce_vhci_event_queue *q = sq->userdata; ++ queue_work(q->vhci->tq_state_wq, &q->vhci->w_fw_events); +} + -+static void vhba_release (struct device * dev) ++static void bce_vhci_handle_system_event(struct bce_vhci_event_queue *q, struct bce_vhci_message *msg) +{ -+ return; ++ if (msg->cmd & 0x8000) { ++ bce_vhci_command_queue_deliver_completion(&q->vhci->cq, msg); ++ } else { ++ pr_warn("bce-vhci: Unhandled system event: %x s=%x p1=%x p2=%llx\n", ++ msg->cmd, msg->status, msg->param1, msg->param2); ++ } +} + -+static struct platform_device vhba_platform_device = { -+ .name = "vhba", -+ .id = -1, -+ .dev = { -+ .release = vhba_release, -+ }, -+}; ++static void bce_vhci_handle_usb_event(struct bce_vhci_event_queue *q, struct bce_vhci_message *msg) ++{ ++ bce_vhci_device_t devid; ++ u8 endp; ++ struct bce_vhci_device *dev; ++ if (msg->cmd & 0x8000) { ++ bce_vhci_command_queue_deliver_completion(&q->vhci->cq, msg); ++ } else if (msg->cmd == BCE_VHCI_CMD_TRANSFER_REQUEST || msg->cmd == BCE_VHCI_CMD_CONTROL_TRANSFER_STATUS) { ++ devid = (bce_vhci_device_t) (msg->param1 & 0xff); ++ endp = bce_vhci_endpoint_index((u8) ((msg->param1 >> 8) & 0xff)); ++ dev = q->vhci->devices[devid]; ++ if (!dev || (dev->tq_mask & BIT(endp)) == 0) { ++ pr_err("bce-vhci: Didn't find destination for transfer queue event\n"); ++ return; ++ } ++ bce_vhci_transfer_queue_event(&dev->tq[endp], msg); ++ } else { ++ pr_warn("bce-vhci: Unhandled USB event: %x s=%x p1=%x p2=%llx\n", ++ msg->cmd, msg->status, msg->param1, msg->param2); ++ } ++} + -+static struct platform_driver vhba_platform_driver = { -+ .driver = { -+ .owner = THIS_MODULE, -+ .name = "vhba", -+ }, -+ .probe = vhba_probe, -+ .remove = vhba_remove, -+}; + -+static int __init vhba_init (void) -+{ -+ int ret; + -+ ret = platform_device_register(&vhba_platform_device); -+ if (ret < 0) { -+ return ret; -+ } ++static const struct hc_driver bce_vhci_driver = { ++ .description = "bce-vhci", ++ .product_desc = "BCE VHCI Host Controller", ++ .hcd_priv_size = sizeof(struct bce_vhci *), + -+ ret = platform_driver_register(&vhba_platform_driver); -+ if (ret < 0) { -+ platform_device_unregister(&vhba_platform_device); -+ return ret; -+ } ++#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) ++ .flags = HCD_USB2, ++#else ++ .flags = HCD_USB2 | HCD_DMA, ++#endif ++ ++ .start = bce_vhci_start, ++ .stop = bce_vhci_stop, ++ .hub_status_data = bce_vhci_hub_status_data, ++ .hub_control = bce_vhci_hub_control, ++ .urb_enqueue = bce_vhci_urb_enqueue, ++ .urb_dequeue = bce_vhci_urb_dequeue, ++ .enable_device = bce_vhci_enable_device, ++ .free_dev = bce_vhci_free_device, ++ .address_device = bce_vhci_address_device, ++ .add_endpoint = bce_vhci_add_endpoint, ++ .drop_endpoint = bce_vhci_drop_endpoint, ++ .endpoint_reset = bce_vhci_endpoint_reset, ++ .check_bandwidth = bce_vhci_check_bandwidth, ++ .get_frame_number = bce_vhci_get_frame_number, ++ .bus_suspend = bce_vhci_bus_suspend, ++ .bus_resume = bce_vhci_bus_resume ++}; + -+ ret = misc_register(&vhba_miscdev); -+ if (ret < 0) { -+ platform_driver_unregister(&vhba_platform_driver); -+ platform_device_unregister(&vhba_platform_device); -+ return ret; -+ } + ++int __init bce_vhci_module_init(void) ++{ ++ int result; ++ if ((result = alloc_chrdev_region(&bce_vhci_chrdev, 0, 1, "bce-vhci"))) ++ goto fail_chrdev; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(6,4,0) ++ bce_vhci_class = class_create(THIS_MODULE, "bce-vhci"); ++#else ++ bce_vhci_class = class_create("bce-vhci"); ++#endif ++ if (IS_ERR(bce_vhci_class)) { ++ result = PTR_ERR(bce_vhci_class); ++ goto fail_class; ++ } + return 0; -+} + -+static void __exit vhba_exit(void) ++fail_class: ++ class_destroy(bce_vhci_class); ++fail_chrdev: ++ unregister_chrdev_region(bce_vhci_chrdev, 1); ++ if (!result) ++ result = -EINVAL; ++ return result; ++} ++void __exit bce_vhci_module_exit(void) +{ -+ misc_deregister(&vhba_miscdev); -+ platform_driver_unregister(&vhba_platform_driver); -+ platform_device_unregister(&vhba_platform_device); ++ class_destroy(bce_vhci_class); ++ unregister_chrdev_region(bce_vhci_chrdev, 1); +} + -+module_init(vhba_init); -+module_exit(vhba_exit); ++module_param_named(vhci_port_mask, bce_vhci_port_mask, ushort, 0444); ++MODULE_PARM_DESC(vhci_port_mask, "Specifies which VHCI ports are enabled"); +diff --git a/drivers/staging/apple-bce/vhci/vhci.h b/drivers/staging/apple-bce/vhci/vhci.h +new file mode 100644 +index 000000000000..6c2e22622f4c +--- /dev/null ++++ b/drivers/staging/apple-bce/vhci/vhci.h +@@ -0,0 +1,52 @@ ++#ifndef BCE_VHCI_H ++#define BCE_VHCI_H ++ ++#include "queue.h" ++#include "transfer.h" ++ ++struct usb_hcd; ++struct bce_queue_cq; ++ ++struct bce_vhci_device { ++ struct bce_vhci_transfer_queue tq[32]; ++ u32 tq_mask; ++}; ++struct bce_vhci { ++ struct apple_bce_device *dev; ++ dev_t vdevt; ++ struct device *vdev; ++ struct usb_hcd *hcd; ++ struct spinlock hcd_spinlock; ++ struct bce_vhci_message_queue msg_commands; ++ struct bce_vhci_message_queue msg_system; ++ struct bce_vhci_message_queue msg_isochronous; ++ struct bce_vhci_message_queue msg_interrupt; ++ struct bce_vhci_message_queue msg_asynchronous; ++ struct spinlock msg_asynchronous_lock; ++ struct bce_vhci_command_queue cq; ++ struct bce_queue_cq *ev_cq; ++ struct bce_vhci_event_queue ev_commands; ++ struct bce_vhci_event_queue ev_system; ++ struct bce_vhci_event_queue ev_isochronous; ++ struct bce_vhci_event_queue ev_interrupt; ++ struct bce_vhci_event_queue ev_asynchronous; ++ u16 port_mask; ++ u8 port_count; ++ u16 port_power_mask; ++ bce_vhci_device_t port_to_device[16]; ++ struct bce_vhci_device *devices[16]; ++ struct workqueue_struct *tq_state_wq; ++ struct work_struct w_fw_events; ++}; ++ ++int __init bce_vhci_module_init(void); ++void __exit bce_vhci_module_exit(void); ++ ++int bce_vhci_create(struct apple_bce_device *dev, struct bce_vhci *vhci); ++void bce_vhci_destroy(struct bce_vhci *vhci); ++int bce_vhci_start(struct usb_hcd *hcd); ++void bce_vhci_stop(struct usb_hcd *hcd); ++ ++struct bce_vhci *bce_vhci_from_hcd(struct usb_hcd *hcd); + ++#endif //BCE_VHCI_H diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index f699e5827ccb..99e86b983c77 100644 --- a/drivers/video/backlight/backlight.c @@ -17831,6 +29122,39 @@ index f699e5827ccb..99e86b983c77 100644 sysfs_notify(&bd->dev.kobj, NULL, "actual_brightness"); } +diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h +index 428d81afe215..aa1604d92c1a 100644 +--- a/include/drm/drm_format_helper.h ++++ b/include/drm/drm_format_helper.h +@@ -96,6 +96,9 @@ void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_ + void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state); ++void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch, ++ const struct iosys_map *src, const struct drm_framebuffer *fb, ++ const struct drm_rect *clip, struct drm_format_conv_state *state); + void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state); +diff --git a/include/linux/mm.h b/include/linux/mm.h +index b1c3db9cf355..e38685ece897 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -206,6 +206,14 @@ static inline void __mm_zero_struct_page(struct page *page) + + extern int sysctl_max_map_count; + ++extern bool sysctl_workingset_protection; ++extern u8 sysctl_anon_min_ratio; ++extern u8 sysctl_clean_low_ratio; ++extern u8 sysctl_clean_min_ratio; ++int vm_workingset_protection_update_handler( ++ const struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos); ++ + extern unsigned long sysctl_user_reserve_kbytes; + extern unsigned long sysctl_admin_reserve_kbytes; + diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index bcf0865a38ae..e748afb0ce06 100644 --- a/include/linux/pagemap.h @@ -19874,10 +31198,10 @@ index dbf896f3146c..4702cd2f1ffc 100644 /* * Sender's congestion state indicating normal or abnormal situations diff --git a/init/Kconfig b/init/Kconfig -index 7fe82a46e88c..7315826c6df3 100644 +index a20e6efd3f0f..9437171030e2 100644 --- a/init/Kconfig +++ b/init/Kconfig -@@ -157,6 +157,10 @@ config THREAD_INFO_IN_TASK +@@ -154,6 +154,10 @@ config THREAD_INFO_IN_TASK menu "General setup" @@ -19888,7 +31212,7 @@ index 7fe82a46e88c..7315826c6df3 100644 config BROKEN bool -@@ -1324,6 +1328,22 @@ config USER_NS +@@ -1310,6 +1314,22 @@ config USER_NS If unsure, say N. @@ -19911,7 +31235,7 @@ index 7fe82a46e88c..7315826c6df3 100644 config PID_NS bool "PID Namespaces" default y -@@ -1466,6 +1486,12 @@ config CC_OPTIMIZE_FOR_PERFORMANCE +@@ -1452,6 +1472,12 @@ config CC_OPTIMIZE_FOR_PERFORMANCE with the "-O2" compiler flag for best performance and most helpful compile-time warnings. @@ -19980,7 +31304,7 @@ index 54ea59ff8fbe..18f87e0dd137 100644 help This option turns the kernel into a real-time kernel by replacing diff --git a/kernel/fork.c b/kernel/fork.c -index ded49f18cd95..94ccb32bc0b5 100644 +index 9b301180fd41..e919c8c3a121 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -106,6 +106,10 @@ @@ -19994,7 +31318,7 @@ index ded49f18cd95..94ccb32bc0b5 100644 #include #include #include -@@ -1511,12 +1515,13 @@ struct file *get_task_exe_file(struct task_struct *task) +@@ -1514,12 +1518,13 @@ struct file *get_task_exe_file(struct task_struct *task) struct file *exe_file = NULL; struct mm_struct *mm; @@ -20012,7 +31336,7 @@ index ded49f18cd95..94ccb32bc0b5 100644 task_unlock(task); return exe_file; } -@@ -2155,6 +2160,10 @@ __latent_entropy struct task_struct *copy_process( +@@ -2158,6 +2163,10 @@ __latent_entropy struct task_struct *copy_process( if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); @@ -20023,7 +31347,7 @@ index ded49f18cd95..94ccb32bc0b5 100644 /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. -@@ -3308,6 +3317,12 @@ int ksys_unshare(unsigned long unshare_flags) +@@ -3311,6 +3320,12 @@ int ksys_unshare(unsigned long unshare_flags) if (unshare_flags & CLONE_NEWNS) unshare_flags |= CLONE_FS; @@ -20127,7 +31451,7 @@ index 2ddb827e3bea..464049c4af3f 100644 return state; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c -index 7fee43426ee7..42ac6a9838c1 100644 +index 19813b387ef9..29f9cf31dd34 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5206,9 +5206,9 @@ static void scx_dump_task(struct seq_buf *s, struct scx_dump_ctx *dctx, @@ -20143,10 +31467,10 @@ index 7fee43426ee7..42ac6a9838c1 100644 if (SCX_HAS_OP(dump_task)) { diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 1e78caa21436..7c82f3568bc9 100644 +index 26958431deb7..c532ffb153b4 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -76,10 +76,19 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; +@@ -73,10 +73,19 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; * * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) */ @@ -20166,7 +31490,7 @@ index 1e78caa21436..7c82f3568bc9 100644 static int __init setup_sched_thermal_decay_shift(char *str) { -@@ -124,8 +133,12 @@ int __weak arch_asym_cpu_priority(int cpu) +@@ -121,8 +130,12 @@ int __weak arch_asym_cpu_priority(int cpu) * * (default: 5 msec, units: microseconds) */ @@ -20179,7 +31503,7 @@ index 1e78caa21436..7c82f3568bc9 100644 #ifdef CONFIG_NUMA_BALANCING /* Restrict the NUMA promotion throughput (MB/s) for each target node. */ -@@ -9887,6 +9900,8 @@ struct sg_lb_stats { +@@ -9823,6 +9836,8 @@ struct sg_lb_stats { unsigned int group_weight; enum group_type group_type; unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */ @@ -20188,7 +31512,7 @@ index 1e78caa21436..7c82f3568bc9 100644 unsigned int group_smt_balance; /* Task on busy SMT be moved */ unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */ #ifdef CONFIG_NUMA_BALANCING -@@ -10216,7 +10231,7 @@ sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group +@@ -10152,7 +10167,7 @@ sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group (sgs->group_weight - sgs->idle_cpus != 1)) return false; @@ -20197,7 +31521,7 @@ index 1e78caa21436..7c82f3568bc9 100644 } /* One group has more than one SMT CPU while the other group does not */ -@@ -10297,6 +10312,17 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd) +@@ -10233,6 +10248,17 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd) return check_cpu_capacity(rq, sd); } @@ -20215,7 +31539,14 @@ index 1e78caa21436..7c82f3568bc9 100644 /** * update_sg_lb_stats - Update sched_group's statistics for load balancing. * @env: The load balancing environment. -@@ -10319,6 +10345,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, +@@ -10249,11 +10275,13 @@ static inline void update_sg_lb_stats(struct lb_env *env, + bool *sg_overloaded, + bool *sg_overutilized) + { +- int i, nr_running, local_group; ++ int i, nr_running, local_group, sd_flags = env->sd->flags; ++ bool balancing_at_rd = !env->sd->parent; + memset(sgs, 0, sizeof(*sgs)); local_group = group == sds->local; @@ -20223,17 +31554,49 @@ index 1e78caa21436..7c82f3568bc9 100644 for_each_cpu_and(i, sched_group_span(group), env->cpus) { struct rq *rq = cpu_rq(i); -@@ -10332,6 +10359,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, +@@ -10267,16 +10295,12 @@ static inline void update_sg_lb_stats(struct lb_env *env, nr_running = rq->nr_running; sgs->sum_nr_running += nr_running; +- if (nr_running > 1) +- *sg_overloaded = 1; + if (sd_flags & SD_ASYM_PACKING) + update_sg_pick_asym_prefer(sgs, i); -+ + if (cpu_overutilized(i)) *sg_overutilized = 1; -@@ -10453,7 +10483,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, +-#ifdef CONFIG_NUMA_BALANCING +- sgs->nr_numa_running += rq->nr_numa_running; +- sgs->nr_preferred_running += rq->nr_preferred_running; +-#endif + /* + * No need to call idle_cpu() if nr_running is not 0 + */ +@@ -10286,10 +10310,21 @@ static inline void update_sg_lb_stats(struct lb_env *env, + continue; + } + ++ /* Overload indicator is only updated at root domain */ ++ if (balancing_at_rd && nr_running > 1) ++ *sg_overloaded = 1; ++ ++#ifdef CONFIG_NUMA_BALANCING ++ /* Only fbq_classify_group() uses this to classify NUMA groups */ ++ if (sd_flags & SD_NUMA) { ++ sgs->nr_numa_running += rq->nr_numa_running; ++ sgs->nr_preferred_running += rq->nr_preferred_running; ++ } ++#endif + if (local_group) + continue; + +- if (env->sd->flags & SD_ASYM_CPUCAPACITY) { ++ if (sd_flags & SD_ASYM_CPUCAPACITY) { + /* Check for a misfit task on the cpu */ + if (sgs->group_misfit_task_load < rq->misfit_task_load) { + sgs->group_misfit_task_load = rq->misfit_task_load; +@@ -10384,7 +10419,7 @@ static bool update_sd_pick_busiest(struct lb_env *env, case group_asym_packing: /* Prefer to move from lowest priority CPU's work */ @@ -20243,10 +31606,10 @@ index 1e78caa21436..7c82f3568bc9 100644 case group_misfit_task: /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index c7cf4cc57cdd..4fc59e4c9fb5 100644 +index c5d67a43fe52..dee2797009e3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -2052,7 +2052,6 @@ struct sched_group { +@@ -2056,7 +2056,6 @@ struct sched_group { unsigned int group_weight; unsigned int cores; struct sched_group_capacity *sgc; @@ -20254,7 +31617,7 @@ index c7cf4cc57cdd..4fc59e4c9fb5 100644 int flags; /* -@@ -2816,7 +2815,7 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); +@@ -2820,7 +2819,7 @@ extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); extern void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags); @@ -20264,7 +31627,7 @@ index c7cf4cc57cdd..4fc59e4c9fb5 100644 #else # define SCHED_NR_MIGRATE_BREAK 32 diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c -index da33ec9e94ab..32ecfc261259 100644 +index 9748a4c8d668..59b8157cb114 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1302,7 +1302,7 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd) @@ -20502,7 +31865,7 @@ index c00a86931f8c..d82213d68522 100644 COND_SYSCALL(mbind); COND_SYSCALL(get_mempolicy); diff --git a/kernel/sysctl.c b/kernel/sysctl.c -index 7ae7a4136855..98efcab489b4 100644 +index 5c9202cb8f59..de4ddf79fe97 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -80,6 +80,9 @@ @@ -20531,6 +31894,47 @@ index 7ae7a4136855..98efcab489b4 100644 #ifdef CONFIG_PROC_SYSCTL { .procname = "tainted", +@@ -2197,6 +2209,40 @@ static struct ctl_table vm_table[] = { + .extra1 = SYSCTL_ZERO, + }, + #endif ++ { ++ .procname = "workingset_protection", ++ .data = &sysctl_workingset_protection, ++ .maxlen = sizeof(bool), ++ .mode = 0644, ++ .proc_handler = &proc_dobool, ++ }, ++ { ++ .procname = "anon_min_ratio", ++ .data = &sysctl_anon_min_ratio, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = &vm_workingset_protection_update_handler, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE_HUNDRED, ++ }, ++ { ++ .procname = "clean_low_ratio", ++ .data = &sysctl_clean_low_ratio, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = &vm_workingset_protection_update_handler, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE_HUNDRED, ++ }, ++ { ++ .procname = "clean_min_ratio", ++ .data = &sysctl_clean_min_ratio, ++ .maxlen = sizeof(u8), ++ .mode = 0644, ++ .proc_handler = &vm_workingset_protection_update_handler, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE_HUNDRED, ++ }, + { + .procname = "user_reserve_kbytes", + .data = &sysctl_user_reserve_kbytes, diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index aa0b2e47f2f2..d74d857b1696 100644 --- a/kernel/user_namespace.c @@ -20549,6 +31953,112 @@ index aa0b2e47f2f2..d74d857b1696 100644 static struct kmem_cache *user_ns_cachep __ro_after_init; static DEFINE_MUTEX(userns_state_mutex); +diff --git a/lib/test_printf.c b/lib/test_printf.c +index 59dbe4f9a4cb..6fc82cb0b4cf 100644 +--- a/lib/test_printf.c ++++ b/lib/test_printf.c +@@ -779,18 +779,26 @@ static void __init fwnode_pointer(void) + static void __init fourcc_pointer(void) + { + struct { ++ char type; + u32 code; + char *str; + } const try[] = { +- { 0x3231564e, "NV12 little-endian (0x3231564e)", }, +- { 0xb231564e, "NV12 big-endian (0xb231564e)", }, +- { 0x10111213, ".... little-endian (0x10111213)", }, +- { 0x20303159, "Y10 little-endian (0x20303159)", }, ++ { 'c', 0x3231564e, "NV12 little-endian (0x3231564e)", }, ++ { 'c', 0xb231564e, "NV12 big-endian (0xb231564e)", }, ++ { 'c', 0x10111213, ".... little-endian (0x10111213)", }, ++ { 'c', 0x20303159, "Y10 little-endian (0x20303159)", }, ++ { 'h', 0x67503030, "gP00 (0x67503030)", }, ++ { 'r', 0x30305067, "gP00 (0x67503030)", }, ++ { 'l', cpu_to_le32(0x67503030), "gP00 (0x67503030)", }, ++ { 'b', cpu_to_be32(0x67503030), "gP00 (0x67503030)", }, + }; + unsigned int i; + +- for (i = 0; i < ARRAY_SIZE(try); i++) +- test(try[i].str, "%p4cc", &try[i].code); ++ for (i = 0; i < ARRAY_SIZE(try); i++) { ++ char fmt[] = { '%', 'p', '4', 'c', try[i].type, '\0' }; ++ ++ test(try[i].str, fmt, &try[i].code); ++ } + } + + static void __init +diff --git a/lib/vsprintf.c b/lib/vsprintf.c +index 9d3dac38a3f4..17926ad7863c 100644 +--- a/lib/vsprintf.c ++++ b/lib/vsprintf.c +@@ -1795,27 +1795,50 @@ char *fourcc_string(char *buf, char *end, const u32 *fourcc, + char output[sizeof("0123 little-endian (0x01234567)")]; + char *p = output; + unsigned int i; ++ bool pix_fmt = false; + u32 orig, val; + +- if (fmt[1] != 'c' || fmt[2] != 'c') ++ if (fmt[1] != 'c') + return error_string(buf, end, "(%p4?)", spec); + + if (check_pointer(&buf, end, fourcc, spec)) + return buf; + + orig = get_unaligned(fourcc); +- val = orig & ~BIT(31); ++ switch (fmt[2]) { ++ case 'h': ++ val = orig; ++ break; ++ case 'r': ++ val = orig = swab32(orig); ++ break; ++ case 'l': ++ val = orig = le32_to_cpu(orig); ++ break; ++ case 'b': ++ val = orig = be32_to_cpu(orig); ++ break; ++ case 'c': ++ /* Pixel formats are printed LSB-first */ ++ val = swab32(orig & ~BIT(31)); ++ pix_fmt = true; ++ break; ++ default: ++ return error_string(buf, end, "(%p4?)", spec); ++ } + + for (i = 0; i < sizeof(u32); i++) { +- unsigned char c = val >> (i * 8); ++ unsigned char c = val >> ((3 - i) * 8); + + /* Print non-control ASCII characters as-is, dot otherwise */ + *p++ = isascii(c) && isprint(c) ? c : '.'; + } + +- *p++ = ' '; +- strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian"); +- p += strlen(p); ++ if (pix_fmt) { ++ *p++ = ' '; ++ strcpy(p, orig & BIT(31) ? "big-endian" : "little-endian"); ++ p += strlen(p); ++ } + + *p++ = ' '; + *p++ = '('; +@@ -2379,6 +2402,7 @@ char *rust_fmt_argument(char *buf, char *end, void *ptr); + * read the documentation (path below) first. + * - 'NF' For a netdev_features_t + * - '4cc' V4L2 or DRM FourCC code, with endianness and raw numerical value. ++ * - '4c[hlbr]' Generic FourCC code. + * - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with + * a certain separator (' ' by default): + * C colon diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile index 20f08c644b71..464c410b2768 100644 --- a/lib/zstd/Makefile @@ -37571,10 +49081,80 @@ index 469fc3059be0..0ae819f0c927 100644 EXPORT_SYMBOL(zstd_reset_dstream); diff --git a/mm/Kconfig b/mm/Kconfig -index 84000b016808..d5067e65c5f7 100644 +index 84000b016808..1d96c5cc35d3 100644 --- a/mm/Kconfig +++ b/mm/Kconfig -@@ -648,7 +648,7 @@ config COMPACTION +@@ -499,6 +499,69 @@ config ARCH_WANT_OPTIMIZE_DAX_VMEMMAP + config ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP + bool + ++config ANON_MIN_RATIO ++ int "Default value for vm.anon_min_ratio" ++ depends on SYSCTL ++ range 0 100 ++ default 15 ++ help ++ This option sets the default value for vm.anon_min_ratio sysctl knob. ++ ++ The vm.anon_min_ratio sysctl knob provides *hard* protection of ++ anonymous pages. The anonymous pages on the current node won't be ++ reclaimed under any conditions when their amount is below ++ vm.anon_min_ratio. This knob may be used to prevent excessive swap ++ thrashing when anonymous memory is low (for example, when memory is ++ going to be overfilled by compressed data of zram module). ++ ++ Setting this value too high (close to MemTotal) can result in ++ inability to swap and can lead to early OOM under memory pressure. ++ ++config CLEAN_LOW_RATIO ++ int "Default value for vm.clean_low_ratio" ++ depends on SYSCTL ++ range 0 100 ++ default 0 ++ help ++ This option sets the default value for vm.clean_low_ratio sysctl knob. ++ ++ The vm.clean_low_ratio sysctl knob provides *best-effort* ++ protection of clean file pages. The file pages on the current node ++ won't be reclaimed under memory pressure when the amount of clean file ++ pages is below vm.clean_low_ratio *unless* we threaten to OOM. ++ Protection of clean file pages using this knob may be used when ++ swapping is still possible to ++ - prevent disk I/O thrashing under memory pressure; ++ - improve performance in disk cache-bound tasks under memory ++ pressure. ++ ++ Setting it to a high value may result in a early eviction of anonymous ++ pages into the swap space by attempting to hold the protected amount ++ of clean file pages in memory. ++ ++config CLEAN_MIN_RATIO ++ int "Default value for vm.clean_min_ratio" ++ depends on SYSCTL ++ range 0 100 ++ default 15 ++ help ++ This option sets the default value for vm.clean_min_ratio sysctl knob. ++ ++ The vm.clean_min_ratio sysctl knob provides *hard* protection of ++ clean file pages. The file pages on the current node won't be ++ reclaimed under memory pressure when the amount of clean file pages is ++ below vm.clean_min_ratio. Hard protection of clean file pages using ++ this knob may be used to ++ - prevent disk I/O thrashing under memory pressure even with no free ++ swap space; ++ - improve performance in disk cache-bound tasks under memory ++ pressure; ++ - avoid high latency and prevent livelock in near-OOM conditions. ++ ++ Setting it to a high value may result in a early out-of-memory condition ++ due to the inability to reclaim the protected amount of clean file pages ++ when other types of pages cannot be reclaimed. ++ + config HAVE_MEMBLOCK_PHYS_MAP + bool + +@@ -648,7 +711,7 @@ config COMPACTION config COMPACT_UNEVICTABLE_DEFAULT int depends on COMPACTION @@ -37584,7 +49164,7 @@ index 84000b016808..d5067e65c5f7 100644 # diff --git a/mm/compaction.c b/mm/compaction.c -index a31c0f5758cf..20dac47673f1 100644 +index a2b16b08cbbf..48d611e58ad3 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1920,7 +1920,11 @@ static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNE @@ -37627,6 +49207,18 @@ index 398c031be9ba..3d98aaf9b939 100644 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, end); hugetlb_zap_begin(vma, &range.start, &range.end); +diff --git a/mm/mm_init.c b/mm/mm_init.c +index 24b68b425afb..081ddb92db87 100644 +--- a/mm/mm_init.c ++++ b/mm/mm_init.c +@@ -2630,6 +2630,7 @@ static void __init mem_init_print_info(void) + , K(totalhigh_pages()) + #endif + ); ++ printk(KERN_INFO "le9 Unofficial (le9uo) working set protection 1.8 by Masahito Suzuki (forked from hakavlad's original le9 patch)"); + } + + /* diff --git a/mm/mmap.c b/mm/mmap.c index aec208f90337..d628b7900d2d 100644 --- a/mm/mmap.c @@ -37761,10 +49353,38 @@ index bd5183dfd879..3a410f53a07c 100644 /* diff --git a/mm/vmscan.c b/mm/vmscan.c -index 867a2554434a..4f2b44ef124c 100644 +index b1ec5ece067e..e258174d240a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -200,7 +200,11 @@ struct scan_control { +@@ -148,6 +148,15 @@ struct scan_control { + /* The file folios on the current node are dangerously low */ + unsigned int file_is_tiny:1; + ++ /* The anonymous pages on the current node are below vm.anon_min_ratio */ ++ unsigned int anon_below_min:1; ++ ++ /* The clean file pages on the current node are below vm.clean_low_ratio */ ++ unsigned int clean_below_low:1; ++ ++ /* The clean file pages on the current node are below vm.clean_min_ratio */ ++ unsigned int clean_below_min:1; ++ + /* Always discard instead of demoting to lower tier memory */ + unsigned int no_demotion:1; + +@@ -197,10 +206,23 @@ struct scan_control { + #define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0) + #endif + ++bool sysctl_workingset_protection __read_mostly = true; ++u8 sysctl_anon_min_ratio __read_mostly = CONFIG_ANON_MIN_RATIO; ++u8 sysctl_clean_low_ratio __read_mostly = CONFIG_CLEAN_LOW_RATIO; ++u8 sysctl_clean_min_ratio __read_mostly = CONFIG_CLEAN_MIN_RATIO; ++static u64 sysctl_anon_min_ratio_kb __read_mostly = 0; ++static u64 sysctl_clean_low_ratio_kb __read_mostly = 0; ++static u64 sysctl_clean_min_ratio_kb __read_mostly = 0; ++static u64 workingset_protection_prev_totalram __read_mostly = 0; ++ /* * From 0 .. MAX_SWAPPINESS. Higher means more swappy. */ @@ -37776,6 +49396,176 @@ index 867a2554434a..4f2b44ef124c 100644 #ifdef CONFIG_MEMCG +@@ -1097,6 +1119,10 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, + folio_mapped(folio) && folio_test_referenced(folio)) + goto keep_locked; + ++ if (folio_is_file_lru(folio) ? sc->clean_below_min : ++ (sc->anon_below_min && !sc->clean_below_min)) ++ goto keep_locked; ++ + /* + * The number of dirty pages determines if a node is marked + * reclaim_congested. kswapd will stall and start writing +@@ -2423,6 +2449,15 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, + goto out; + } + ++ /* ++ * Force-scan anon if clean file pages is under vm.clean_low_ratio ++ * or vm.clean_min_ratio. ++ */ ++ if (sc->clean_below_low || sc->clean_below_min) { ++ scan_balance = SCAN_ANON; ++ goto out; ++ } ++ + /* + * If there is enough inactive page cache, we do not reclaim + * anything from the anonymous working right now. +@@ -2567,6 +2602,14 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, + BUG(); + } + ++ /* ++ * Hard protection of the working set. ++ * Don't reclaim anon/file pages when the amount is ++ * below the watermark of the same type. ++ */ ++ if (file ? sc->clean_below_min : sc->anon_below_min) ++ scan = 0; ++ + nr[lru] = scan; + } + } +@@ -2586,6 +2629,96 @@ static bool can_age_anon_pages(struct pglist_data *pgdat, + return can_demote(pgdat->node_id, sc); + } + ++int vm_workingset_protection_update_handler(const struct ctl_table *table, int write, ++ void __user *buffer, size_t *lenp, loff_t *ppos) ++{ ++ int ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos); ++ if (ret || !write) ++ return ret; ++ ++ workingset_protection_prev_totalram = 0; ++ ++ return 0; ++} ++ ++static void prepare_workingset_protection(pg_data_t *pgdat, struct scan_control *sc) ++{ ++ unsigned long node_mem_total; ++ struct sysinfo i; ++ ++ if (!(sysctl_workingset_protection)) { ++ sc->anon_below_min = 0; ++ sc->clean_below_low = 0; ++ sc->clean_below_min = 0; ++ return; ++ } ++ ++ if (likely(sysctl_anon_min_ratio || ++ sysctl_clean_low_ratio || ++ sysctl_clean_min_ratio)) { ++#ifdef CONFIG_NUMA ++ si_meminfo_node(&i, pgdat->node_id); ++#else //CONFIG_NUMA ++ si_meminfo(&i); ++#endif //CONFIG_NUMA ++ node_mem_total = i.totalram; ++ ++ if (unlikely(workingset_protection_prev_totalram != node_mem_total)) { ++ sysctl_anon_min_ratio_kb = ++ node_mem_total * sysctl_anon_min_ratio / 100; ++ sysctl_clean_low_ratio_kb = ++ node_mem_total * sysctl_clean_low_ratio / 100; ++ sysctl_clean_min_ratio_kb = ++ node_mem_total * sysctl_clean_min_ratio / 100; ++ workingset_protection_prev_totalram = node_mem_total; ++ } ++ } ++ ++ /* ++ * Check the number of anonymous pages to protect them from ++ * reclaiming if their amount is below the specified. ++ */ ++ if (sysctl_anon_min_ratio) { ++ unsigned long reclaimable_anon; ++ ++ reclaimable_anon = ++ node_page_state(pgdat, NR_ACTIVE_ANON) + ++ node_page_state(pgdat, NR_INACTIVE_ANON) + ++ node_page_state(pgdat, NR_ISOLATED_ANON); ++ ++ sc->anon_below_min = reclaimable_anon < sysctl_anon_min_ratio_kb; ++ } else ++ sc->anon_below_min = 0; ++ ++ /* ++ * Check the number of clean file pages to protect them from ++ * reclaiming if their amount is below the specified. ++ */ ++ if (sysctl_clean_low_ratio || sysctl_clean_min_ratio) { ++ unsigned long reclaimable_file, dirty, clean; ++ ++ reclaimable_file = ++ node_page_state(pgdat, NR_ACTIVE_FILE) + ++ node_page_state(pgdat, NR_INACTIVE_FILE) + ++ node_page_state(pgdat, NR_ISOLATED_FILE); ++ dirty = node_page_state(pgdat, NR_FILE_DIRTY); ++ /* ++ * node_page_state() sum can go out of sync since ++ * all the values are not read at once. ++ */ ++ if (likely(reclaimable_file > dirty)) ++ clean = reclaimable_file - dirty; ++ else ++ clean = 0; ++ ++ sc->clean_below_low = clean < sysctl_clean_low_ratio_kb; ++ sc->clean_below_min = clean < sysctl_clean_min_ratio_kb; ++ } else { ++ sc->clean_below_low = 0; ++ sc->clean_below_min = 0; ++ } ++} ++ + #ifdef CONFIG_LRU_GEN + + #ifdef CONFIG_LRU_GEN_ENABLED +@@ -4539,6 +4672,12 @@ static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int sw + */ + if (!swappiness) + type = LRU_GEN_FILE; ++ else if (sc->clean_below_min) ++ type = LRU_GEN_ANON; ++ else if (sc->anon_below_min) ++ type = LRU_GEN_FILE; ++ else if (sc->clean_below_low) ++ type = LRU_GEN_ANON; + else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE]) + type = LRU_GEN_ANON; + else if (swappiness == 1) +@@ -4829,6 +4968,8 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc) + struct mem_cgroup *memcg = lruvec_memcg(lruvec); + struct pglist_data *pgdat = lruvec_pgdat(lruvec); + ++ prepare_workingset_protection(pgdat, sc); ++ + /* lru_gen_age_node() called mem_cgroup_calculate_protection() */ + if (mem_cgroup_below_min(NULL, memcg)) + return MEMCG_LRU_YOUNG; +@@ -5977,6 +6118,8 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) + + prepare_scan_control(pgdat, sc); + ++ prepare_workingset_protection(pgdat, sc); ++ + shrink_node_memcgs(pgdat, sc); + + flush_reclaim_state(sc); diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 6d2c97f8e9ef..ddc116ef22cb 100644 --- a/net/ipv4/Kconfig @@ -40879,6 +52669,19 @@ index b412ed88ccd9..d70f8b742b21 100644 tcp_mstamp_refresh(tcp_sk(sk)); event = icsk->icsk_pending; +diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl +index 9eed3683ad76..7ddbf75f4c26 100755 +--- a/scripts/checkpatch.pl ++++ b/scripts/checkpatch.pl +@@ -6912,7 +6912,7 @@ sub process { + ($extension eq "f" && + defined $qualifier && $qualifier !~ /^w/) || + ($extension eq "4" && +- defined $qualifier && $qualifier !~ /^cc/)) { ++ defined $qualifier && $qualifier !~ /^c[chlbr]/)) { + $bad_specifier = $specifier; + last; + } diff --git a/scripts/package/PKGBUILD b/scripts/package/PKGBUILD index dca706617adc..89d3aef160b7 100644 --- a/scripts/package/PKGBUILD diff --git a/patches/linux/6.13/sched-core-2025-01-21.patch b/patches/linux/6.13/sched-core-2025-01-21.patch deleted file mode 100644 index ad83ae7..0000000 --- a/patches/linux/6.13/sched-core-2025-01-21.patch +++ /dev/null @@ -1,2457 +0,0 @@ -diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index 262a94621446..05f5935eeac8 100644 ---- a/Documentation/admin-guide/kernel-parameters.txt -+++ b/Documentation/admin-guide/kernel-parameters.txt -@@ -2506,7 +2506,9 @@ - specified in the flag list (default: domain): - - nohz -- Disable the tick when a single task runs. -+ Disable the tick when a single task runs as well as -+ disabling other kernel noises like having RCU callbacks -+ offloaded. This is equivalent to the nohz_full parameter. - - A residual 1Hz tick is offloaded to workqueues, which you - need to affine to housekeeping through the global -diff --git a/Documentation/scheduler/sched-stats.rst b/Documentation/scheduler/sched-stats.rst -index 7c2b16c4729d..caea83d91c67 100644 ---- a/Documentation/scheduler/sched-stats.rst -+++ b/Documentation/scheduler/sched-stats.rst -@@ -2,6 +2,12 @@ - Scheduler Statistics - ==================== - -+Version 17 of schedstats removed 'lb_imbalance' field as it has no -+significance anymore and instead added more relevant fields namely -+'lb_imbalance_load', 'lb_imbalance_util', 'lb_imbalance_task' and -+'lb_imbalance_misfit'. The domain field prints the name of the -+corresponding sched domain from this version onwards. -+ - Version 16 of schedstats changed the order of definitions within - 'enum cpu_idle_type', which changed the order of [CPU_MAX_IDLE_TYPES] - columns in show_schedstat(). In particular the position of CPU_IDLE -@@ -9,7 +15,9 @@ and __CPU_NOT_IDLE changed places. The size of the array is unchanged. - - Version 15 of schedstats dropped counters for some sched_yield: - yld_exp_empty, yld_act_empty and yld_both_empty. Otherwise, it is --identical to version 14. -+identical to version 14. Details are available at -+ -+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/scheduler/sched-stats.txt?id=1e1dbb259c79b - - Version 14 of schedstats includes support for sched_domains, which hit the - mainline kernel in 2.6.20 although it is identical to the stats from version -@@ -26,7 +34,14 @@ cpus on the machine, while domain0 is the most tightly focused domain, - sometimes balancing only between pairs of cpus. At this time, there - are no architectures which need more than three domain levels. The first - field in the domain stats is a bit map indicating which cpus are affected --by that domain. -+by that domain. Details are available at -+ -+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/sched-stats.txt?id=b762f3ffb797c -+ -+The schedstat documentation is maintained version 10 onwards and is not -+updated for version 11 and 12. The details for version 10 are available at -+ -+ https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/sched-stats.txt?id=1da177e4c3f4 - - These fields are counters, and only increment. Programs which make use - of these will need to start with a baseline observation and then calculate -@@ -71,88 +86,97 @@ Domain statistics - ----------------- - One of these is produced per domain for each cpu described. (Note that if - CONFIG_SMP is not defined, *no* domains are utilized and these lines --will not appear in the output.) -+will not appear in the output. is an extension to the domain field -+that prints the name of the corresponding sched domain. It can appear in -+schedstat version 17 and above, and requires CONFIG_SCHED_DEBUG.) - --domain 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 -+domain 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 - - The first field is a bit mask indicating what cpus this domain operates over. - --The next 24 are a variety of sched_balance_rq() statistics in grouped into types --of idleness (idle, busy, and newly idle): -+The next 33 are a variety of sched_balance_rq() statistics in grouped into types -+of idleness (busy, idle and newly idle): - - 1) # of times in this domain sched_balance_rq() was called when the -+ cpu was busy -+ 2) # of times in this domain sched_balance_rq() checked but found the -+ load did not require balancing when busy -+ 3) # of times in this domain sched_balance_rq() tried to move one or -+ more tasks and failed, when the cpu was busy -+ 4) Total imbalance in load when the cpu was busy -+ 5) Total imbalance in utilization when the cpu was busy -+ 6) Total imbalance in number of tasks when the cpu was busy -+ 7) Total imbalance due to misfit tasks when the cpu was busy -+ 8) # of times in this domain pull_task() was called when busy -+ 9) # of times in this domain pull_task() was called even though the -+ target task was cache-hot when busy -+ 10) # of times in this domain sched_balance_rq() was called but did not -+ find a busier queue while the cpu was busy -+ 11) # of times in this domain a busier queue was found while the cpu -+ was busy but no busier group was found -+ -+ 12) # of times in this domain sched_balance_rq() was called when the - cpu was idle -- 2) # of times in this domain sched_balance_rq() checked but found -+ 13) # of times in this domain sched_balance_rq() checked but found - the load did not require balancing when the cpu was idle -- 3) # of times in this domain sched_balance_rq() tried to move one or -+ 14) # of times in this domain sched_balance_rq() tried to move one or - more tasks and failed, when the cpu was idle -- 4) sum of imbalances discovered (if any) with each call to -- sched_balance_rq() in this domain when the cpu was idle -- 5) # of times in this domain pull_task() was called when the cpu -+ 15) Total imbalance in load when the cpu was idle -+ 16) Total imbalance in utilization when the cpu was idle -+ 17) Total imbalance in number of tasks when the cpu was idle -+ 18) Total imbalance due to misfit tasks when the cpu was idle -+ 19) # of times in this domain pull_task() was called when the cpu - was idle -- 6) # of times in this domain pull_task() was called even though -+ 20) # of times in this domain pull_task() was called even though - the target task was cache-hot when idle -- 7) # of times in this domain sched_balance_rq() was called but did -+ 21) # of times in this domain sched_balance_rq() was called but did - not find a busier queue while the cpu was idle -- 8) # of times in this domain a busier queue was found while the -+ 22) # of times in this domain a busier queue was found while the - cpu was idle but no busier group was found -- 9) # of times in this domain sched_balance_rq() was called when the -- cpu was busy -- 10) # of times in this domain sched_balance_rq() checked but found the -- load did not require balancing when busy -- 11) # of times in this domain sched_balance_rq() tried to move one or -- more tasks and failed, when the cpu was busy -- 12) sum of imbalances discovered (if any) with each call to -- sched_balance_rq() in this domain when the cpu was busy -- 13) # of times in this domain pull_task() was called when busy -- 14) # of times in this domain pull_task() was called even though the -- target task was cache-hot when busy -- 15) # of times in this domain sched_balance_rq() was called but did not -- find a busier queue while the cpu was busy -- 16) # of times in this domain a busier queue was found while the cpu -- was busy but no busier group was found - -- 17) # of times in this domain sched_balance_rq() was called when the -- cpu was just becoming idle -- 18) # of times in this domain sched_balance_rq() checked but found the -+ 23) # of times in this domain sched_balance_rq() was called when the -+ was just becoming idle -+ 24) # of times in this domain sched_balance_rq() checked but found the - load did not require balancing when the cpu was just becoming idle -- 19) # of times in this domain sched_balance_rq() tried to move one or more -+ 25) # of times in this domain sched_balance_rq() tried to move one or more - tasks and failed, when the cpu was just becoming idle -- 20) sum of imbalances discovered (if any) with each call to -- sched_balance_rq() in this domain when the cpu was just becoming idle -- 21) # of times in this domain pull_task() was called when newly idle -- 22) # of times in this domain pull_task() was called even though the -+ 26) Total imbalance in load when the cpu was just becoming idle -+ 27) Total imbalance in utilization when the cpu was just becoming idle -+ 28) Total imbalance in number of tasks when the cpu was just becoming idle -+ 29) Total imbalance due to misfit tasks when the cpu was just becoming idle -+ 30) # of times in this domain pull_task() was called when newly idle -+ 31) # of times in this domain pull_task() was called even though the - target task was cache-hot when just becoming idle -- 23) # of times in this domain sched_balance_rq() was called but did not -+ 32) # of times in this domain sched_balance_rq() was called but did not - find a busier queue while the cpu was just becoming idle -- 24) # of times in this domain a busier queue was found while the cpu -+ 33) # of times in this domain a busier queue was found while the cpu - was just becoming idle but no busier group was found - - Next three are active_load_balance() statistics: - -- 25) # of times active_load_balance() was called -- 26) # of times active_load_balance() tried to move a task and failed -- 27) # of times active_load_balance() successfully moved a task -+ 34) # of times active_load_balance() was called -+ 35) # of times active_load_balance() tried to move a task and failed -+ 36) # of times active_load_balance() successfully moved a task - - Next three are sched_balance_exec() statistics: - -- 28) sbe_cnt is not used -- 29) sbe_balanced is not used -- 30) sbe_pushed is not used -+ 37) sbe_cnt is not used -+ 38) sbe_balanced is not used -+ 39) sbe_pushed is not used - - Next three are sched_balance_fork() statistics: - -- 31) sbf_cnt is not used -- 32) sbf_balanced is not used -- 33) sbf_pushed is not used -+ 40) sbf_cnt is not used -+ 41) sbf_balanced is not used -+ 42) sbf_pushed is not used - - Next three are try_to_wake_up() statistics: - -- 34) # of times in this domain try_to_wake_up() awoke a task that -+ 43) # of times in this domain try_to_wake_up() awoke a task that - last ran on a different cpu in this domain -- 35) # of times in this domain try_to_wake_up() moved a task to the -+ 44) # of times in this domain try_to_wake_up() moved a task to the - waking cpu because it was cache-cold on its own cpu anyway -- 36) # of times in this domain try_to_wake_up() started passive balancing -+ 45) # of times in this domain try_to_wake_up() started passive balancing - - /proc//schedstat - --------------------- -diff --git a/include/linux/sched.h b/include/linux/sched.h -index 3c7eb16ab1d5..ac08431e238f 100644 ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -944,6 +944,7 @@ struct task_struct { - unsigned sched_reset_on_fork:1; - unsigned sched_contributes_to_load:1; - unsigned sched_migrated:1; -+ unsigned sched_task_hot:1; - - /* Force alignment to the next boundary: */ - unsigned :0; -@@ -1374,6 +1375,15 @@ struct task_struct { - * with respect to preemption. - */ - unsigned long rseq_event_mask; -+# ifdef CONFIG_DEBUG_RSEQ -+ /* -+ * This is a place holder to save a copy of the rseq fields for -+ * validation of read-only fields. The struct rseq has a -+ * variable-length array at the end, so it cannot be used -+ * directly. Reserve a size large enough for the known fields. -+ */ -+ char rseq_fields[sizeof(struct rseq)]; -+# endif - #endif - - #ifdef CONFIG_SCHED_MM_CID -diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h -index 2b461129d1fa..d8501f4709b5 100644 ---- a/include/linux/sched/isolation.h -+++ b/include/linux/sched/isolation.h -@@ -7,16 +7,21 @@ - #include - - enum hk_type { -- HK_TYPE_TIMER, -- HK_TYPE_RCU, -- HK_TYPE_MISC, -- HK_TYPE_SCHED, -- HK_TYPE_TICK, - HK_TYPE_DOMAIN, -- HK_TYPE_WQ, - HK_TYPE_MANAGED_IRQ, -- HK_TYPE_KTHREAD, -- HK_TYPE_MAX -+ HK_TYPE_KERNEL_NOISE, -+ HK_TYPE_MAX, -+ -+ /* -+ * The following housekeeping types are only set by the nohz_full -+ * boot commandline option. So they can share the same value. -+ */ -+ HK_TYPE_TICK = HK_TYPE_KERNEL_NOISE, -+ HK_TYPE_TIMER = HK_TYPE_KERNEL_NOISE, -+ HK_TYPE_RCU = HK_TYPE_KERNEL_NOISE, -+ HK_TYPE_MISC = HK_TYPE_KERNEL_NOISE, -+ HK_TYPE_WQ = HK_TYPE_KERNEL_NOISE, -+ HK_TYPE_KTHREAD = HK_TYPE_KERNEL_NOISE - }; - - #ifdef CONFIG_CPU_ISOLATION -diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h -index 4237daa5ac7a..7f3dbafe1817 100644 ---- a/include/linux/sched/topology.h -+++ b/include/linux/sched/topology.h -@@ -114,7 +114,10 @@ struct sched_domain { - unsigned int lb_count[CPU_MAX_IDLE_TYPES]; - unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; - unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; -- unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; -+ unsigned int lb_imbalance_load[CPU_MAX_IDLE_TYPES]; -+ unsigned int lb_imbalance_util[CPU_MAX_IDLE_TYPES]; -+ unsigned int lb_imbalance_task[CPU_MAX_IDLE_TYPES]; -+ unsigned int lb_imbalance_misfit[CPU_MAX_IDLE_TYPES]; - unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; - unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; - unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; -@@ -140,9 +143,7 @@ struct sched_domain { - unsigned int ttwu_move_affine; - unsigned int ttwu_move_balance; - #endif --#ifdef CONFIG_SCHED_DEBUG - char *name; --#endif - union { - void *private; /* used during construction */ - struct rcu_head rcu; /* used during destruction */ -@@ -198,18 +199,12 @@ struct sched_domain_topology_level { - int flags; - int numa_level; - struct sd_data data; --#ifdef CONFIG_SCHED_DEBUG - char *name; --#endif - }; - - extern void __init set_sched_topology(struct sched_domain_topology_level *tl); - --#ifdef CONFIG_SCHED_DEBUG - # define SD_INIT_NAME(type) .name = #type --#else --# define SD_INIT_NAME(type) --#endif - - #else /* CONFIG_SMP */ - -diff --git a/kernel/rseq.c b/kernel/rseq.c -index 9de6e35fe679..442aba29bc4c 100644 ---- a/kernel/rseq.c -+++ b/kernel/rseq.c -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - #include - - #define CREATE_TRACE_POINTS -@@ -25,6 +26,78 @@ - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL | \ - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE) - -+#ifdef CONFIG_DEBUG_RSEQ -+static struct rseq *rseq_kernel_fields(struct task_struct *t) -+{ -+ return (struct rseq *) t->rseq_fields; -+} -+ -+static int rseq_validate_ro_fields(struct task_struct *t) -+{ -+ static DEFINE_RATELIMIT_STATE(_rs, -+ DEFAULT_RATELIMIT_INTERVAL, -+ DEFAULT_RATELIMIT_BURST); -+ u32 cpu_id_start, cpu_id, node_id, mm_cid; -+ struct rseq __user *rseq = t->rseq; -+ -+ /* -+ * Validate fields which are required to be read-only by -+ * user-space. -+ */ -+ if (!user_read_access_begin(rseq, t->rseq_len)) -+ goto efault; -+ unsafe_get_user(cpu_id_start, &rseq->cpu_id_start, efault_end); -+ unsafe_get_user(cpu_id, &rseq->cpu_id, efault_end); -+ unsafe_get_user(node_id, &rseq->node_id, efault_end); -+ unsafe_get_user(mm_cid, &rseq->mm_cid, efault_end); -+ user_read_access_end(); -+ -+ if ((cpu_id_start != rseq_kernel_fields(t)->cpu_id_start || -+ cpu_id != rseq_kernel_fields(t)->cpu_id || -+ node_id != rseq_kernel_fields(t)->node_id || -+ mm_cid != rseq_kernel_fields(t)->mm_cid) && __ratelimit(&_rs)) { -+ -+ pr_warn("Detected rseq corruption for pid: %d, name: %s\n" -+ "\tcpu_id_start: %u ?= %u\n" -+ "\tcpu_id: %u ?= %u\n" -+ "\tnode_id: %u ?= %u\n" -+ "\tmm_cid: %u ?= %u\n", -+ t->pid, t->comm, -+ cpu_id_start, rseq_kernel_fields(t)->cpu_id_start, -+ cpu_id, rseq_kernel_fields(t)->cpu_id, -+ node_id, rseq_kernel_fields(t)->node_id, -+ mm_cid, rseq_kernel_fields(t)->mm_cid); -+ } -+ -+ /* For now, only print a console warning on mismatch. */ -+ return 0; -+ -+efault_end: -+ user_read_access_end(); -+efault: -+ return -EFAULT; -+} -+ -+static void rseq_set_ro_fields(struct task_struct *t, u32 cpu_id_start, u32 cpu_id, -+ u32 node_id, u32 mm_cid) -+{ -+ rseq_kernel_fields(t)->cpu_id_start = cpu_id; -+ rseq_kernel_fields(t)->cpu_id = cpu_id; -+ rseq_kernel_fields(t)->node_id = node_id; -+ rseq_kernel_fields(t)->mm_cid = mm_cid; -+} -+#else -+static int rseq_validate_ro_fields(struct task_struct *t) -+{ -+ return 0; -+} -+ -+static void rseq_set_ro_fields(struct task_struct *t, u32 cpu_id_start, u32 cpu_id, -+ u32 node_id, u32 mm_cid) -+{ -+} -+#endif -+ - /* - * - * Restartable sequences are a lightweight interface that allows -@@ -92,6 +165,11 @@ static int rseq_update_cpu_node_id(struct task_struct *t) - u32 node_id = cpu_to_node(cpu_id); - u32 mm_cid = task_mm_cid(t); - -+ /* -+ * Validate read-only rseq fields. -+ */ -+ if (rseq_validate_ro_fields(t)) -+ goto efault; - WARN_ON_ONCE((int) mm_cid < 0); - if (!user_write_access_begin(rseq, t->rseq_len)) - goto efault; -@@ -105,6 +183,7 @@ static int rseq_update_cpu_node_id(struct task_struct *t) - * t->rseq_len != ORIG_RSEQ_SIZE. - */ - user_write_access_end(); -+ rseq_set_ro_fields(t, cpu_id, cpu_id, node_id, mm_cid); - trace_rseq_update(t); - return 0; - -@@ -119,6 +198,11 @@ static int rseq_reset_rseq_cpu_node_id(struct task_struct *t) - u32 cpu_id_start = 0, cpu_id = RSEQ_CPU_ID_UNINITIALIZED, node_id = 0, - mm_cid = 0; - -+ /* -+ * Validate read-only rseq fields. -+ */ -+ if (rseq_validate_ro_fields(t)) -+ return -EFAULT; - /* - * Reset cpu_id_start to its initial state (0). - */ -@@ -141,6 +225,9 @@ static int rseq_reset_rseq_cpu_node_id(struct task_struct *t) - */ - if (put_user(mm_cid, &t->rseq->mm_cid)) - return -EFAULT; -+ -+ rseq_set_ro_fields(t, cpu_id_start, cpu_id, node_id, mm_cid); -+ - /* - * Additional feature fields added after ORIG_RSEQ_SIZE - * need to be conditionally reset only if -@@ -423,6 +510,17 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len, - current->rseq = rseq; - current->rseq_len = rseq_len; - current->rseq_sig = sig; -+#ifdef CONFIG_DEBUG_RSEQ -+ /* -+ * Initialize the in-kernel rseq fields copy for validation of -+ * read-only fields. -+ */ -+ if (get_user(rseq_kernel_fields(current)->cpu_id_start, &rseq->cpu_id_start) || -+ get_user(rseq_kernel_fields(current)->cpu_id, &rseq->cpu_id) || -+ get_user(rseq_kernel_fields(current)->node_id, &rseq->node_id) || -+ get_user(rseq_kernel_fields(current)->mm_cid, &rseq->mm_cid)) -+ return -EFAULT; -+#endif - /* - * If rseq was previously inactive, and has just been - * registered, ensure the cpu_id_start and cpu_id fields -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 3e5a6bf587f9..4365b479e345 100644 ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -740,39 +740,43 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) - s64 __maybe_unused steal = 0, irq_delta = 0; - - #ifdef CONFIG_IRQ_TIME_ACCOUNTING -- irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; -+ if (irqtime_enabled()) { -+ irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; - -- /* -- * Since irq_time is only updated on {soft,}irq_exit, we might run into -- * this case when a previous update_rq_clock() happened inside a -- * {soft,}IRQ region. -- * -- * When this happens, we stop ->clock_task and only update the -- * prev_irq_time stamp to account for the part that fit, so that a next -- * update will consume the rest. This ensures ->clock_task is -- * monotonic. -- * -- * It does however cause some slight miss-attribution of {soft,}IRQ -- * time, a more accurate solution would be to update the irq_time using -- * the current rq->clock timestamp, except that would require using -- * atomic ops. -- */ -- if (irq_delta > delta) -- irq_delta = delta; -+ /* -+ * Since irq_time is only updated on {soft,}irq_exit, we might run into -+ * this case when a previous update_rq_clock() happened inside a -+ * {soft,}IRQ region. -+ * -+ * When this happens, we stop ->clock_task and only update the -+ * prev_irq_time stamp to account for the part that fit, so that a next -+ * update will consume the rest. This ensures ->clock_task is -+ * monotonic. -+ * -+ * It does however cause some slight miss-attribution of {soft,}IRQ -+ * time, a more accurate solution would be to update the irq_time using -+ * the current rq->clock timestamp, except that would require using -+ * atomic ops. -+ */ -+ if (irq_delta > delta) -+ irq_delta = delta; - -- rq->prev_irq_time += irq_delta; -- delta -= irq_delta; -- delayacct_irq(rq->curr, irq_delta); -+ rq->prev_irq_time += irq_delta; -+ delta -= irq_delta; -+ delayacct_irq(rq->curr, irq_delta); -+ } - #endif - #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING - if (static_key_false((¶virt_steal_rq_enabled))) { -- steal = paravirt_steal_clock(cpu_of(rq)); -+ u64 prev_steal; -+ -+ steal = prev_steal = paravirt_steal_clock(cpu_of(rq)); - steal -= rq->prev_steal_time_rq; - - if (unlikely(steal > delta)) - steal = delta; - -- rq->prev_steal_time_rq += steal; -+ rq->prev_steal_time_rq = prev_steal; - delta -= steal; - } - #endif -@@ -1168,13 +1172,13 @@ int get_nohz_timer_target(void) - struct sched_domain *sd; - const struct cpumask *hk_mask; - -- if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) { -+ if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) { - if (!idle_cpu(cpu)) - return cpu; - default_cpu = cpu; - } - -- hk_mask = housekeeping_cpumask(HK_TYPE_TIMER); -+ hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE); - - guard(rcu)(); - -@@ -1189,7 +1193,7 @@ int get_nohz_timer_target(void) - } - - if (default_cpu == -1) -- default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER); -+ default_cpu = housekeeping_any_cpu(HK_TYPE_KERNEL_NOISE); - - return default_cpu; - } -@@ -1341,7 +1345,7 @@ bool sched_can_stop_tick(struct rq *rq) - if (scx_enabled() && !scx_can_stop_tick(rq)) - return false; - -- if (rq->cfs.h_nr_running > 1) -+ if (rq->cfs.h_nr_queued > 1) - return false; - - /* -@@ -5632,7 +5636,7 @@ void sched_tick(void) - unsigned long hw_pressure; - u64 resched_latency; - -- if (housekeeping_cpu(cpu, HK_TYPE_TICK)) -+ if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) - arch_scale_freq_tick(); - - sched_clock_tick(); -@@ -5771,7 +5775,7 @@ static void sched_tick_start(int cpu) - int os; - struct tick_work *twork; - -- if (housekeeping_cpu(cpu, HK_TYPE_TICK)) -+ if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) - return; - - WARN_ON_ONCE(!tick_work_cpu); -@@ -5792,7 +5796,7 @@ static void sched_tick_stop(int cpu) - struct tick_work *twork; - int os; - -- if (housekeeping_cpu(cpu, HK_TYPE_TICK)) -+ if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) - return; - - WARN_ON_ONCE(!tick_work_cpu); -@@ -6018,7 +6022,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) - * opportunity to pull in more work from other CPUs. - */ - if (likely(!sched_class_above(prev->sched_class, &fair_sched_class) && -- rq->nr_running == rq->cfs.h_nr_running)) { -+ rq->nr_running == rq->cfs.h_nr_queued)) { - - p = pick_next_task_fair(rq, prev, rf); - if (unlikely(p == RETRY_TASK)) -@@ -6641,7 +6645,6 @@ static void __sched notrace __schedule(int sched_mode) - * as a preemption by schedule_debug() and RCU. - */ - bool preempt = sched_mode > SM_NONE; -- bool block = false; - unsigned long *switch_count; - unsigned long prev_state; - struct rq_flags rf; -@@ -6702,7 +6705,7 @@ static void __sched notrace __schedule(int sched_mode) - goto picked; - } - } else if (!preempt && prev_state) { -- block = try_to_block_task(rq, prev, prev_state); -+ try_to_block_task(rq, prev, prev_state); - switch_count = &prev->nvcsw; - } - -@@ -6748,7 +6751,8 @@ static void __sched notrace __schedule(int sched_mode) - - migrate_disable_switch(rq, prev); - psi_account_irqtime(rq, prev, next); -- psi_sched_switch(prev, next, block); -+ psi_sched_switch(prev, next, !task_on_rq_queued(prev) || -+ prev->se.sched_delayed); - - trace_sched_switch(preempt, prev, next, prev_state); - -@@ -8180,19 +8184,14 @@ static void cpuset_cpu_active(void) - cpuset_update_active_cpus(); - } - --static int cpuset_cpu_inactive(unsigned int cpu) -+static void cpuset_cpu_inactive(unsigned int cpu) - { - if (!cpuhp_tasks_frozen) { -- int ret = dl_bw_check_overflow(cpu); -- -- if (ret) -- return ret; - cpuset_update_active_cpus(); - } else { - num_cpus_frozen++; - partition_sched_domains(1, NULL, NULL); - } -- return 0; - } - - static inline void sched_smt_present_inc(int cpu) -@@ -8254,6 +8253,11 @@ int sched_cpu_deactivate(unsigned int cpu) - struct rq *rq = cpu_rq(cpu); - int ret; - -+ ret = dl_bw_deactivate(cpu); -+ -+ if (ret) -+ return ret; -+ - /* - * Remove CPU from nohz.idle_cpus_mask to prevent participating in - * load balancing when not active -@@ -8299,15 +8303,7 @@ int sched_cpu_deactivate(unsigned int cpu) - return 0; - - sched_update_numa(cpu, false); -- ret = cpuset_cpu_inactive(cpu); -- if (ret) { -- sched_smt_present_inc(cpu); -- sched_set_rq_online(rq, cpu); -- balance_push_set(cpu, false); -- set_cpu_active(cpu, true); -- sched_update_numa(cpu, true); -- return ret; -- } -+ cpuset_cpu_inactive(cpu); - sched_domains_numa_masks_clear(cpu); - return 0; - } -diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c -index 0bed0fa1acd9..5d9143dd0879 100644 ---- a/kernel/sched/cputime.c -+++ b/kernel/sched/cputime.c -@@ -9,6 +9,8 @@ - - #ifdef CONFIG_IRQ_TIME_ACCOUNTING - -+DEFINE_STATIC_KEY_FALSE(sched_clock_irqtime); -+ - /* - * There are no locks covering percpu hardirq/softirq time. - * They are only modified in vtime_account, on corresponding CPU -@@ -22,16 +24,14 @@ - */ - DEFINE_PER_CPU(struct irqtime, cpu_irqtime); - --static int sched_clock_irqtime; -- - void enable_sched_clock_irqtime(void) - { -- sched_clock_irqtime = 1; -+ static_branch_enable(&sched_clock_irqtime); - } - - void disable_sched_clock_irqtime(void) - { -- sched_clock_irqtime = 0; -+ static_branch_disable(&sched_clock_irqtime); - } - - static void irqtime_account_delta(struct irqtime *irqtime, u64 delta, -@@ -57,7 +57,7 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset) - s64 delta; - int cpu; - -- if (!sched_clock_irqtime) -+ if (!irqtime_enabled()) - return; - - cpu = smp_processor_id(); -@@ -90,8 +90,6 @@ static u64 irqtime_tick_accounted(u64 maxtime) - - #else /* CONFIG_IRQ_TIME_ACCOUNTING */ - --#define sched_clock_irqtime (0) -- - static u64 irqtime_tick_accounted(u64 dummy) - { - return 0; -@@ -478,7 +476,7 @@ void account_process_tick(struct task_struct *p, int user_tick) - if (vtime_accounting_enabled_this_cpu()) - return; - -- if (sched_clock_irqtime) { -+ if (irqtime_enabled()) { - irqtime_account_process_tick(p, user_tick, 1); - return; - } -@@ -507,7 +505,7 @@ void account_idle_ticks(unsigned long ticks) - { - u64 cputime, steal; - -- if (sched_clock_irqtime) { -+ if (irqtime_enabled()) { - irqtime_account_idle_ticks(ticks); - return; - } -diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c -index d94f2ed6d1f4..62192ac79c30 100644 ---- a/kernel/sched/deadline.c -+++ b/kernel/sched/deadline.c -@@ -342,6 +342,29 @@ static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_s - __add_rq_bw(new_bw, &rq->dl); - } - -+static __always_inline -+void cancel_dl_timer(struct sched_dl_entity *dl_se, struct hrtimer *timer) -+{ -+ /* -+ * If the timer callback was running (hrtimer_try_to_cancel == -1), -+ * it will eventually call put_task_struct(). -+ */ -+ if (hrtimer_try_to_cancel(timer) == 1 && !dl_server(dl_se)) -+ put_task_struct(dl_task_of(dl_se)); -+} -+ -+static __always_inline -+void cancel_replenish_timer(struct sched_dl_entity *dl_se) -+{ -+ cancel_dl_timer(dl_se, &dl_se->dl_timer); -+} -+ -+static __always_inline -+void cancel_inactive_timer(struct sched_dl_entity *dl_se) -+{ -+ cancel_dl_timer(dl_se, &dl_se->inactive_timer); -+} -+ - static void dl_change_utilization(struct task_struct *p, u64 new_bw) - { - WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV); -@@ -495,10 +518,7 @@ static void task_contending(struct sched_dl_entity *dl_se, int flags) - * will not touch the rq's active utilization, - * so we are still safe. - */ -- if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) { -- if (!dl_server(dl_se)) -- put_task_struct(dl_task_of(dl_se)); -- } -+ cancel_inactive_timer(dl_se); - } else { - /* - * Since "dl_non_contending" is not set, the -@@ -2115,13 +2135,8 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) - * The replenish timer needs to be canceled. No - * problem if it fires concurrently: boosted threads - * are ignored in dl_task_timer(). -- * -- * If the timer callback was running (hrtimer_try_to_cancel == -1), -- * it will eventually call put_task_struct(). - */ -- if (hrtimer_try_to_cancel(&p->dl.dl_timer) == 1 && -- !dl_server(&p->dl)) -- put_task_struct(p); -+ cancel_replenish_timer(&p->dl); - p->dl.dl_throttled = 0; - } - } else if (!dl_prio(p->normal_prio)) { -@@ -2289,8 +2304,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused - * will not touch the rq's active utilization, - * so we are still safe. - */ -- if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) -- put_task_struct(p); -+ cancel_inactive_timer(&p->dl); - } - sub_rq_bw(&p->dl, &rq->dl); - rq_unlock(rq, &rf); -@@ -2506,16 +2520,13 @@ static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu - return NULL; - - next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root); -- --next_node: -- if (next_node) { -+ while (next_node) { - p = __node_2_pdl(next_node); - - if (task_is_pushable(rq, p, cpu)) - return p; - - next_node = rb_next(next_node); -- goto next_node; - } - - return NULL; -@@ -2964,11 +2975,22 @@ void dl_add_task_root_domain(struct task_struct *p) - - void dl_clear_root_domain(struct root_domain *rd) - { -- unsigned long flags; -+ int i; - -- raw_spin_lock_irqsave(&rd->dl_bw.lock, flags); -+ guard(raw_spinlock_irqsave)(&rd->dl_bw.lock); - rd->dl_bw.total_bw = 0; -- raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags); -+ -+ /* -+ * dl_server bandwidth is only restored when CPUs are attached to root -+ * domains (after domains are created or CPUs moved back to the -+ * default root doamin). -+ */ -+ for_each_cpu(i, rd->span) { -+ struct sched_dl_entity *dl_se = &cpu_rq(i)->fair_server; -+ -+ if (dl_server(dl_se) && cpu_active(i)) -+ rd->dl_bw.total_bw += dl_se->dl_bw; -+ } - } - - #endif /* CONFIG_SMP */ -@@ -3029,8 +3051,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) - */ - static void switched_to_dl(struct rq *rq, struct task_struct *p) - { -- if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) -- put_task_struct(p); -+ cancel_inactive_timer(&p->dl); - - /* - * In case a task is setscheduled to SCHED_DEADLINE we need to keep -@@ -3453,29 +3474,31 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, - } - - enum dl_bw_request { -- dl_bw_req_check_overflow = 0, -+ dl_bw_req_deactivate = 0, - dl_bw_req_alloc, - dl_bw_req_free - }; - - static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw) - { -- unsigned long flags; -+ unsigned long flags, cap; - struct dl_bw *dl_b; - bool overflow = 0; -+ u64 fair_server_bw = 0; - - rcu_read_lock_sched(); - dl_b = dl_bw_of(cpu); - raw_spin_lock_irqsave(&dl_b->lock, flags); - -- if (req == dl_bw_req_free) { -+ cap = dl_bw_capacity(cpu); -+ switch (req) { -+ case dl_bw_req_free: - __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu)); -- } else { -- unsigned long cap = dl_bw_capacity(cpu); -- -+ break; -+ case dl_bw_req_alloc: - overflow = __dl_overflow(dl_b, cap, 0, dl_bw); - -- if (req == dl_bw_req_alloc && !overflow) { -+ if (!overflow) { - /* - * We reserve space in the destination - * root_domain, as we can't fail after this point. -@@ -3484,6 +3507,42 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw) - */ - __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu)); - } -+ break; -+ case dl_bw_req_deactivate: -+ /* -+ * cpu is not off yet, but we need to do the math by -+ * considering it off already (i.e., what would happen if we -+ * turn cpu off?). -+ */ -+ cap -= arch_scale_cpu_capacity(cpu); -+ -+ /* -+ * cpu is going offline and NORMAL tasks will be moved away -+ * from it. We can thus discount dl_server bandwidth -+ * contribution as it won't need to be servicing tasks after -+ * the cpu is off. -+ */ -+ if (cpu_rq(cpu)->fair_server.dl_server) -+ fair_server_bw = cpu_rq(cpu)->fair_server.dl_bw; -+ -+ /* -+ * Not much to check if no DEADLINE bandwidth is present. -+ * dl_servers we can discount, as tasks will be moved out the -+ * offlined CPUs anyway. -+ */ -+ if (dl_b->total_bw - fair_server_bw > 0) { -+ /* -+ * Leaving at least one CPU for DEADLINE tasks seems a -+ * wise thing to do. As said above, cpu is not offline -+ * yet, so account for that. -+ */ -+ if (dl_bw_cpus(cpu) - 1) -+ overflow = __dl_overflow(dl_b, cap, fair_server_bw, 0); -+ else -+ overflow = 1; -+ } -+ -+ break; - } - - raw_spin_unlock_irqrestore(&dl_b->lock, flags); -@@ -3492,9 +3551,9 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw) - return overflow ? -EBUSY : 0; - } - --int dl_bw_check_overflow(int cpu) -+int dl_bw_deactivate(int cpu) - { -- return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0); -+ return dl_bw_manage(dl_bw_req_deactivate, cpu, 0); - } - - int dl_bw_alloc(int cpu, u64 dl_bw) -diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c -index a1be00a988bf..fd7e85220715 100644 ---- a/kernel/sched/debug.c -+++ b/kernel/sched/debug.c -@@ -379,7 +379,7 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu - return -EINVAL; - } - -- if (rq->cfs.h_nr_running) { -+ if (rq->cfs.h_nr_queued) { - update_rq_clock(rq); - dl_server_stop(&rq->fair_server); - } -@@ -392,7 +392,7 @@ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubu - printk_deferred("Fair server disabled in CPU %d, system may crash due to starvation.\n", - cpu_of(rq)); - -- if (rq->cfs.h_nr_running) -+ if (rq->cfs.h_nr_queued) - dl_server_start(&rq->fair_server); - } - -@@ -843,13 +843,10 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) - SPLIT_NS(right_vruntime)); - spread = right_vruntime - left_vruntime; - SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread)); -- SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); -- SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running); -- SEQ_printf(m, " .%-30s: %d\n", "h_nr_delayed", cfs_rq->h_nr_delayed); -- SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running", -- cfs_rq->idle_nr_running); -- SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running", -- cfs_rq->idle_h_nr_running); -+ SEQ_printf(m, " .%-30s: %d\n", "nr_queued", cfs_rq->nr_queued); -+ SEQ_printf(m, " .%-30s: %d\n", "h_nr_runnable", cfs_rq->h_nr_runnable); -+ SEQ_printf(m, " .%-30s: %d\n", "h_nr_queued", cfs_rq->h_nr_queued); -+ SEQ_printf(m, " .%-30s: %d\n", "h_nr_idle", cfs_rq->h_nr_idle); - SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); - #ifdef CONFIG_SMP - SEQ_printf(m, " .%-30s: %lu\n", "load_avg", -@@ -1295,8 +1292,10 @@ void resched_latency_warn(int cpu, u64 latency) - { - static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1); - -- WARN(__ratelimit(&latency_check_ratelimit), -- "sched: CPU %d need_resched set for > %llu ns (%d ticks) " -- "without schedule\n", -- cpu, latency, cpu_rq(cpu)->ticks_without_resched); -+ if (likely(!__ratelimit(&latency_check_ratelimit))) -+ return; -+ -+ pr_err("sched: CPU %d need_resched set for > %llu ns (%d ticks) without schedule\n", -+ cpu, latency, cpu_rq(cpu)->ticks_without_resched); -+ dump_stack(); - } -diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index 26958431deb7..1e78caa21436 100644 ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -37,6 +37,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -51,6 +52,8 @@ - - #include - -+#include -+ - #include "sched.h" - #include "stats.h" - #include "autogroup.h" -@@ -523,7 +526,7 @@ void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); - * Scheduling class tree data structure manipulation methods: - */ - --static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime) -+static inline __maybe_unused u64 max_vruntime(u64 max_vruntime, u64 vruntime) - { - s64 delta = (s64)(vruntime - max_vruntime); - if (delta > 0) -@@ -532,7 +535,7 @@ static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime) - return max_vruntime; - } - --static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) -+static inline __maybe_unused u64 min_vruntime(u64 min_vruntime, u64 vruntime) - { - s64 delta = (s64)(vruntime - min_vruntime); - if (delta < 0) -@@ -910,7 +913,7 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq) - * We can safely skip eligibility check if there is only one entity - * in this cfs_rq, saving some cycles. - */ -- if (cfs_rq->nr_running == 1) -+ if (cfs_rq->nr_queued == 1) - return curr && curr->on_rq ? curr : se; - - if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr))) -@@ -1245,7 +1248,7 @@ static void update_curr(struct cfs_rq *cfs_rq) - - account_cfs_rq_runtime(cfs_rq, delta_exec); - -- if (cfs_rq->nr_running == 1) -+ if (cfs_rq->nr_queued == 1) - return; - - if (resched || did_preempt_short(cfs_rq, curr)) { -@@ -2126,7 +2129,7 @@ static void update_numa_stats(struct task_numa_env *env, - ns->load += cpu_load(rq); - ns->runnable += cpu_runnable(rq); - ns->util += cpu_util_cfs(cpu); -- ns->nr_running += rq->cfs.h_nr_running; -+ ns->nr_running += rq->cfs.h_nr_runnable; - ns->compute_capacity += capacity_of(cpu); - - if (find_idle && idle_core < 0 && !rq->nr_running && idle_cpu(cpu)) { -@@ -3677,9 +3680,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) - list_add(&se->group_node, &rq->cfs_tasks); - } - #endif -- cfs_rq->nr_running++; -- if (se_is_idle(se)) -- cfs_rq->idle_nr_running++; -+ cfs_rq->nr_queued++; - } - - static void -@@ -3692,9 +3693,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) - list_del_init(&se->group_node); - } - #endif -- cfs_rq->nr_running--; -- if (se_is_idle(se)) -- cfs_rq->idle_nr_running--; -+ cfs_rq->nr_queued--; - } - - /* -@@ -5128,7 +5127,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) - - static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) - { -- return !cfs_rq->nr_running; -+ return !cfs_rq->nr_queued; - } - - #define UPDATE_TG 0x0 -@@ -5166,6 +5165,22 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} - - #endif /* CONFIG_SMP */ - -+void __setparam_fair(struct task_struct *p, const struct sched_attr *attr) -+{ -+ struct sched_entity *se = &p->se; -+ -+ p->static_prio = NICE_TO_PRIO(attr->sched_nice); -+ if (attr->sched_runtime) { -+ se->custom_slice = 1; -+ se->slice = clamp_t(u64, attr->sched_runtime, -+ NSEC_PER_MSEC/10, /* HZ=1000 * 10 */ -+ NSEC_PER_MSEC*100); /* HZ=100 / 10 */ -+ } else { -+ se->custom_slice = 0; -+ se->slice = sysctl_sched_base_slice; -+ } -+} -+ - static void - place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) - { -@@ -5184,7 +5199,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) - * - * EEVDF: placement strategy #1 / #2 - */ -- if (sched_feat(PLACE_LAG) && cfs_rq->nr_running && se->vlag) { -+ if (sched_feat(PLACE_LAG) && cfs_rq->nr_queued && se->vlag) { - struct sched_entity *curr = cfs_rq->curr; - unsigned long load; - -@@ -5277,8 +5292,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) - static void check_enqueue_throttle(struct cfs_rq *cfs_rq); - static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq); - --static inline bool cfs_bandwidth_used(void); -- - static void - requeue_delayed_entity(struct sched_entity *se); - -@@ -5300,7 +5313,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) - * When enqueuing a sched_entity, we must: - * - Update loads to have both entity and cfs_rq synced with now. - * - For group_entity, update its runnable_weight to reflect the new -- * h_nr_running of its group cfs_rq. -+ * h_nr_runnable of its group cfs_rq. - * - For group_entity, update its weight to reflect the new share of - * its group cfs_rq - * - Add its new weight to cfs_rq->load.weight -@@ -5333,7 +5346,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) - __enqueue_entity(cfs_rq, se); - se->on_rq = 1; - -- if (cfs_rq->nr_running == 1) { -+ if (cfs_rq->nr_queued == 1) { - check_enqueue_throttle(cfs_rq); - if (!throttled_hierarchy(cfs_rq)) { - list_add_leaf_cfs_rq(cfs_rq); -@@ -5375,7 +5388,7 @@ static void set_delayed(struct sched_entity *se) - for_each_sched_entity(se) { - struct cfs_rq *cfs_rq = cfs_rq_of(se); - -- cfs_rq->h_nr_delayed++; -+ cfs_rq->h_nr_runnable--; - if (cfs_rq_throttled(cfs_rq)) - break; - } -@@ -5387,7 +5400,7 @@ static void clear_delayed(struct sched_entity *se) - for_each_sched_entity(se) { - struct cfs_rq *cfs_rq = cfs_rq_of(se); - -- cfs_rq->h_nr_delayed--; -+ cfs_rq->h_nr_runnable++; - if (cfs_rq_throttled(cfs_rq)) - break; - } -@@ -5404,6 +5417,7 @@ static bool - dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) - { - bool sleep = flags & DEQUEUE_SLEEP; -+ int action = UPDATE_TG; - - update_curr(cfs_rq); - clear_buddies(cfs_rq, se); -@@ -5429,7 +5443,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) - } - } - -- int action = UPDATE_TG; - if (entity_is_task(se) && task_on_rq_migrating(task_of(se))) - action |= DO_DETACH; - -@@ -5437,7 +5450,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) - * When dequeuing a sched_entity, we must: - * - Update loads to have both entity and cfs_rq synced with now. - * - For group_entity, update its runnable_weight to reflect the new -- * h_nr_running of its group cfs_rq. -+ * h_nr_runnable of its group cfs_rq. - * - Subtract its previous weight from cfs_rq->load.weight. - * - For group entity, update its weight to reflect the new share - * of its group cfs_rq. -@@ -5475,7 +5488,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) - if (flags & DEQUEUE_DELAYED) - finish_delayed_dequeue_entity(se); - -- if (cfs_rq->nr_running == 0) -+ if (cfs_rq->nr_queued == 0) - update_idle_cfs_rq_clock_pelt(cfs_rq); - - return true; -@@ -5537,17 +5550,19 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags); - static struct sched_entity * - pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq) - { -+ struct sched_entity *se; -+ - /* -- * Enabling NEXT_BUDDY will affect latency but not fairness. -+ * Picking the ->next buddy will affect latency but not fairness. - */ -- if (sched_feat(NEXT_BUDDY) && -+ if (sched_feat(PICK_BUDDY) && - cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next)) { - /* ->next will never be delayed */ - SCHED_WARN_ON(cfs_rq->next->sched_delayed); - return cfs_rq->next; - } - -- struct sched_entity *se = pick_eevdf(cfs_rq); -+ se = pick_eevdf(cfs_rq); - if (se->sched_delayed) { - dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); - /* -@@ -5823,7 +5838,7 @@ static int tg_throttle_down(struct task_group *tg, void *data) - list_del_leaf_cfs_rq(cfs_rq); - - SCHED_WARN_ON(cfs_rq->throttled_clock_self); -- if (cfs_rq->nr_running) -+ if (cfs_rq->nr_queued) - cfs_rq->throttled_clock_self = rq_clock(rq); - } - cfs_rq->throttle_count++; -@@ -5836,8 +5851,8 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) - struct rq *rq = rq_of(cfs_rq); - struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); - struct sched_entity *se; -- long task_delta, idle_task_delta, delayed_delta, dequeue = 1; -- long rq_h_nr_running = rq->cfs.h_nr_running; -+ long queued_delta, runnable_delta, idle_delta, dequeue = 1; -+ long rq_h_nr_queued = rq->cfs.h_nr_queued; - - raw_spin_lock(&cfs_b->lock); - /* This will start the period timer if necessary */ -@@ -5867,9 +5882,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) - walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); - rcu_read_unlock(); - -- task_delta = cfs_rq->h_nr_running; -- idle_task_delta = cfs_rq->idle_h_nr_running; -- delayed_delta = cfs_rq->h_nr_delayed; -+ queued_delta = cfs_rq->h_nr_queued; -+ runnable_delta = cfs_rq->h_nr_runnable; -+ idle_delta = cfs_rq->h_nr_idle; - for_each_sched_entity(se) { - struct cfs_rq *qcfs_rq = cfs_rq_of(se); - int flags; -@@ -5889,11 +5904,11 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) - dequeue_entity(qcfs_rq, se, flags); - - if (cfs_rq_is_idle(group_cfs_rq(se))) -- idle_task_delta = cfs_rq->h_nr_running; -+ idle_delta = cfs_rq->h_nr_queued; - -- qcfs_rq->h_nr_running -= task_delta; -- qcfs_rq->idle_h_nr_running -= idle_task_delta; -- qcfs_rq->h_nr_delayed -= delayed_delta; -+ qcfs_rq->h_nr_queued -= queued_delta; -+ qcfs_rq->h_nr_runnable -= runnable_delta; -+ qcfs_rq->h_nr_idle -= idle_delta; - - if (qcfs_rq->load.weight) { - /* Avoid re-evaluating load for this entity: */ -@@ -5912,18 +5927,18 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) - se_update_runnable(se); - - if (cfs_rq_is_idle(group_cfs_rq(se))) -- idle_task_delta = cfs_rq->h_nr_running; -+ idle_delta = cfs_rq->h_nr_queued; - -- qcfs_rq->h_nr_running -= task_delta; -- qcfs_rq->idle_h_nr_running -= idle_task_delta; -- qcfs_rq->h_nr_delayed -= delayed_delta; -+ qcfs_rq->h_nr_queued -= queued_delta; -+ qcfs_rq->h_nr_runnable -= runnable_delta; -+ qcfs_rq->h_nr_idle -= idle_delta; - } - - /* At this point se is NULL and we are at root level*/ -- sub_nr_running(rq, task_delta); -+ sub_nr_running(rq, queued_delta); - - /* Stop the fair server if throttling resulted in no runnable tasks */ -- if (rq_h_nr_running && !rq->cfs.h_nr_running) -+ if (rq_h_nr_queued && !rq->cfs.h_nr_queued) - dl_server_stop(&rq->fair_server); - done: - /* -@@ -5932,7 +5947,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) - */ - cfs_rq->throttled = 1; - SCHED_WARN_ON(cfs_rq->throttled_clock); -- if (cfs_rq->nr_running) -+ if (cfs_rq->nr_queued) - cfs_rq->throttled_clock = rq_clock(rq); - return true; - } -@@ -5942,8 +5957,8 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) - struct rq *rq = rq_of(cfs_rq); - struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); - struct sched_entity *se; -- long task_delta, idle_task_delta, delayed_delta; -- long rq_h_nr_running = rq->cfs.h_nr_running; -+ long queued_delta, runnable_delta, idle_delta; -+ long rq_h_nr_queued = rq->cfs.h_nr_queued; - - se = cfs_rq->tg->se[cpu_of(rq)]; - -@@ -5976,9 +5991,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) - goto unthrottle_throttle; - } - -- task_delta = cfs_rq->h_nr_running; -- idle_task_delta = cfs_rq->idle_h_nr_running; -- delayed_delta = cfs_rq->h_nr_delayed; -+ queued_delta = cfs_rq->h_nr_queued; -+ runnable_delta = cfs_rq->h_nr_runnable; -+ idle_delta = cfs_rq->h_nr_idle; - for_each_sched_entity(se) { - struct cfs_rq *qcfs_rq = cfs_rq_of(se); - -@@ -5992,11 +6007,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) - enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); - - if (cfs_rq_is_idle(group_cfs_rq(se))) -- idle_task_delta = cfs_rq->h_nr_running; -+ idle_delta = cfs_rq->h_nr_queued; - -- qcfs_rq->h_nr_running += task_delta; -- qcfs_rq->idle_h_nr_running += idle_task_delta; -- qcfs_rq->h_nr_delayed += delayed_delta; -+ qcfs_rq->h_nr_queued += queued_delta; -+ qcfs_rq->h_nr_runnable += runnable_delta; -+ qcfs_rq->h_nr_idle += idle_delta; - - /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(qcfs_rq)) -@@ -6010,11 +6025,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) - se_update_runnable(se); - - if (cfs_rq_is_idle(group_cfs_rq(se))) -- idle_task_delta = cfs_rq->h_nr_running; -+ idle_delta = cfs_rq->h_nr_queued; - -- qcfs_rq->h_nr_running += task_delta; -- qcfs_rq->idle_h_nr_running += idle_task_delta; -- qcfs_rq->h_nr_delayed += delayed_delta; -+ qcfs_rq->h_nr_queued += queued_delta; -+ qcfs_rq->h_nr_runnable += runnable_delta; -+ qcfs_rq->h_nr_idle += idle_delta; - - /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(qcfs_rq)) -@@ -6022,17 +6037,17 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) - } - - /* Start the fair server if un-throttling resulted in new runnable tasks */ -- if (!rq_h_nr_running && rq->cfs.h_nr_running) -+ if (!rq_h_nr_queued && rq->cfs.h_nr_queued) - dl_server_start(&rq->fair_server); - - /* At this point se is NULL and we are at root level*/ -- add_nr_running(rq, task_delta); -+ add_nr_running(rq, queued_delta); - - unthrottle_throttle: - assert_list_leaf_cfs_rq(rq); - - /* Determine whether we need to wake up potentially idle CPU: */ -- if (rq->curr == rq->idle && rq->cfs.nr_running) -+ if (rq->curr == rq->idle && rq->cfs.nr_queued) - resched_curr(rq); - } - -@@ -6333,7 +6348,7 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) - if (!cfs_bandwidth_used()) - return; - -- if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) -+ if (!cfs_rq->runtime_enabled || cfs_rq->nr_queued) - return; - - __return_cfs_rq_runtime(cfs_rq); -@@ -6604,6 +6619,10 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) - - lockdep_assert_rq_held(rq); - -+ // Do not unthrottle for an active CPU -+ if (cpumask_test_cpu(cpu_of(rq), cpu_active_mask)) -+ return; -+ - /* - * The rq clock has already been updated in the - * set_rq_offline(), so we should skip updating -@@ -6618,19 +6637,21 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) - if (!cfs_rq->runtime_enabled) - continue; - -- /* -- * clock_task is not advancing so we just need to make sure -- * there's some valid quota amount -- */ -- cfs_rq->runtime_remaining = 1; - /* - * Offline rq is schedulable till CPU is completely disabled - * in take_cpu_down(), so we prevent new cfs throttling here. - */ - cfs_rq->runtime_enabled = 0; - -- if (cfs_rq_throttled(cfs_rq)) -- unthrottle_cfs_rq(cfs_rq); -+ if (!cfs_rq_throttled(cfs_rq)) -+ continue; -+ -+ /* -+ * clock_task is not advancing so we just need to make sure -+ * there's some valid quota amount -+ */ -+ cfs_rq->runtime_remaining = 1; -+ unthrottle_cfs_rq(cfs_rq); - } - rcu_read_unlock(); - -@@ -6679,11 +6700,6 @@ static void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) - - #else /* CONFIG_CFS_BANDWIDTH */ - --static inline bool cfs_bandwidth_used(void) --{ -- return false; --} -- - static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} - static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } - static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} -@@ -6741,7 +6757,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) - - SCHED_WARN_ON(task_rq(p) != rq); - -- if (rq->cfs.h_nr_running > 1) { -+ if (rq->cfs.h_nr_queued > 1) { - u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; - u64 slice = se->slice; - s64 delta = slice - ran; -@@ -6829,7 +6845,7 @@ static inline void check_update_overutilized_status(struct rq *rq) { } - /* Runqueue only has SCHED_IDLE tasks enqueued */ - static int sched_idle_rq(struct rq *rq) - { -- return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running && -+ return unlikely(rq->nr_running == rq->cfs.h_nr_idle && - rq->nr_running); - } - -@@ -6856,14 +6872,14 @@ requeue_delayed_entity(struct sched_entity *se) - if (sched_feat(DELAY_ZERO)) { - update_entity_lag(cfs_rq, se); - if (se->vlag > 0) { -- cfs_rq->nr_running--; -+ cfs_rq->nr_queued--; - if (se != cfs_rq->curr) - __dequeue_entity(cfs_rq, se); - se->vlag = 0; - place_entity(cfs_rq, se, 0); - if (se != cfs_rq->curr) - __enqueue_entity(cfs_rq, se); -- cfs_rq->nr_running++; -+ cfs_rq->nr_queued++; - } - } - -@@ -6881,10 +6897,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) - { - struct cfs_rq *cfs_rq; - struct sched_entity *se = &p->se; -- int idle_h_nr_running = task_has_idle_policy(p); -- int h_nr_delayed = 0; -+ int h_nr_idle = task_has_idle_policy(p); -+ int h_nr_runnable = 1; - int task_new = !(flags & ENQUEUE_WAKEUP); -- int rq_h_nr_running = rq->cfs.h_nr_running; -+ int rq_h_nr_queued = rq->cfs.h_nr_queued; - u64 slice = 0; - - /* -@@ -6909,8 +6925,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) - if (p->in_iowait) - cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); - -- if (task_new) -- h_nr_delayed = !!se->sched_delayed; -+ if (task_new && se->sched_delayed) -+ h_nr_runnable = 0; - - for_each_sched_entity(se) { - if (se->on_rq) { -@@ -6932,12 +6948,12 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) - enqueue_entity(cfs_rq, se, flags); - slice = cfs_rq_min_slice(cfs_rq); - -- cfs_rq->h_nr_running++; -- cfs_rq->idle_h_nr_running += idle_h_nr_running; -- cfs_rq->h_nr_delayed += h_nr_delayed; -+ cfs_rq->h_nr_runnable += h_nr_runnable; -+ cfs_rq->h_nr_queued++; -+ cfs_rq->h_nr_idle += h_nr_idle; - - if (cfs_rq_is_idle(cfs_rq)) -- idle_h_nr_running = 1; -+ h_nr_idle = 1; - - /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(cfs_rq)) -@@ -6956,19 +6972,19 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) - se->slice = slice; - slice = cfs_rq_min_slice(cfs_rq); - -- cfs_rq->h_nr_running++; -- cfs_rq->idle_h_nr_running += idle_h_nr_running; -- cfs_rq->h_nr_delayed += h_nr_delayed; -+ cfs_rq->h_nr_runnable += h_nr_runnable; -+ cfs_rq->h_nr_queued++; -+ cfs_rq->h_nr_idle += h_nr_idle; - - if (cfs_rq_is_idle(cfs_rq)) -- idle_h_nr_running = 1; -+ h_nr_idle = 1; - - /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(cfs_rq)) - goto enqueue_throttle; - } - -- if (!rq_h_nr_running && rq->cfs.h_nr_running) { -+ if (!rq_h_nr_queued && rq->cfs.h_nr_queued) { - /* Account for idle runtime */ - if (!rq->nr_running) - dl_server_update_idle_time(rq, rq->curr); -@@ -7015,22 +7031,22 @@ static void set_next_buddy(struct sched_entity *se); - static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) - { - bool was_sched_idle = sched_idle_rq(rq); -- int rq_h_nr_running = rq->cfs.h_nr_running; -+ int rq_h_nr_queued = rq->cfs.h_nr_queued; - bool task_sleep = flags & DEQUEUE_SLEEP; - bool task_delayed = flags & DEQUEUE_DELAYED; - struct task_struct *p = NULL; -- int idle_h_nr_running = 0; -- int h_nr_running = 0; -- int h_nr_delayed = 0; -+ int h_nr_idle = 0; -+ int h_nr_queued = 0; -+ int h_nr_runnable = 0; - struct cfs_rq *cfs_rq; - u64 slice = 0; - - if (entity_is_task(se)) { - p = task_of(se); -- h_nr_running = 1; -- idle_h_nr_running = task_has_idle_policy(p); -- if (!task_sleep && !task_delayed) -- h_nr_delayed = !!se->sched_delayed; -+ h_nr_queued = 1; -+ h_nr_idle = task_has_idle_policy(p); -+ if (task_sleep || task_delayed || !se->sched_delayed) -+ h_nr_runnable = 1; - } else { - cfs_rq = group_cfs_rq(se); - slice = cfs_rq_min_slice(cfs_rq); -@@ -7046,12 +7062,12 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) - break; - } - -- cfs_rq->h_nr_running -= h_nr_running; -- cfs_rq->idle_h_nr_running -= idle_h_nr_running; -- cfs_rq->h_nr_delayed -= h_nr_delayed; -+ cfs_rq->h_nr_runnable -= h_nr_runnable; -+ cfs_rq->h_nr_queued -= h_nr_queued; -+ cfs_rq->h_nr_idle -= h_nr_idle; - - if (cfs_rq_is_idle(cfs_rq)) -- idle_h_nr_running = h_nr_running; -+ h_nr_idle = h_nr_queued; - - /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(cfs_rq)) -@@ -7085,21 +7101,21 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) - se->slice = slice; - slice = cfs_rq_min_slice(cfs_rq); - -- cfs_rq->h_nr_running -= h_nr_running; -- cfs_rq->idle_h_nr_running -= idle_h_nr_running; -- cfs_rq->h_nr_delayed -= h_nr_delayed; -+ cfs_rq->h_nr_runnable -= h_nr_runnable; -+ cfs_rq->h_nr_queued -= h_nr_queued; -+ cfs_rq->h_nr_idle -= h_nr_idle; - - if (cfs_rq_is_idle(cfs_rq)) -- idle_h_nr_running = h_nr_running; -+ h_nr_idle = h_nr_queued; - - /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(cfs_rq)) - return 0; - } - -- sub_nr_running(rq, h_nr_running); -+ sub_nr_running(rq, h_nr_queued); - -- if (rq_h_nr_running && !rq->cfs.h_nr_running) -+ if (rq_h_nr_queued && !rq->cfs.h_nr_queued) - dl_server_stop(&rq->fair_server); - - /* balance early to pull high priority tasks */ -@@ -8788,7 +8804,7 @@ static struct task_struct *pick_task_fair(struct rq *rq) - - again: - cfs_rq = &rq->cfs; -- if (!cfs_rq->nr_running) -+ if (!cfs_rq->nr_queued) - return NULL; - - do { -@@ -8905,7 +8921,7 @@ static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_stru - - static bool fair_server_has_tasks(struct sched_dl_entity *dl_se) - { -- return !!dl_se->rq->cfs.nr_running; -+ return !!dl_se->rq->cfs.nr_queued; - } - - static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se) -@@ -9236,43 +9252,43 @@ static int task_hot(struct task_struct *p, struct lb_env *env) - - #ifdef CONFIG_NUMA_BALANCING - /* -- * Returns 1, if task migration degrades locality -- * Returns 0, if task migration improves locality i.e migration preferred. -- * Returns -1, if task migration is not affected by locality. -+ * Returns a positive value, if task migration degrades locality. -+ * Returns 0, if task migration is not affected by locality. -+ * Returns a negative value, if task migration improves locality i.e migration preferred. - */ --static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) -+static long migrate_degrades_locality(struct task_struct *p, struct lb_env *env) - { - struct numa_group *numa_group = rcu_dereference(p->numa_group); - unsigned long src_weight, dst_weight; - int src_nid, dst_nid, dist; - - if (!static_branch_likely(&sched_numa_balancing)) -- return -1; -+ return 0; - - if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) -- return -1; -+ return 0; - - src_nid = cpu_to_node(env->src_cpu); - dst_nid = cpu_to_node(env->dst_cpu); - - if (src_nid == dst_nid) -- return -1; -+ return 0; - - /* Migrating away from the preferred node is always bad. */ - if (src_nid == p->numa_preferred_nid) { - if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) - return 1; - else -- return -1; -+ return 0; - } - - /* Encourage migration to the preferred node. */ - if (dst_nid == p->numa_preferred_nid) -- return 0; -+ return -1; - - /* Leaving a core idle is often worse than degrading locality. */ - if (env->idle == CPU_IDLE) -- return -1; -+ return 0; - - dist = node_distance(src_nid, dst_nid); - if (numa_group) { -@@ -9283,37 +9299,77 @@ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) - dst_weight = task_weight(p, dst_nid, dist); - } - -- return dst_weight < src_weight; -+ return src_weight - dst_weight; - } - - #else --static inline int migrate_degrades_locality(struct task_struct *p, -+static inline long migrate_degrades_locality(struct task_struct *p, - struct lb_env *env) - { -- return -1; -+ return 0; - } - #endif - -+/* -+ * Check whether the task is ineligible on the destination cpu -+ * -+ * When the PLACE_LAG scheduling feature is enabled and -+ * dst_cfs_rq->nr_queued is greater than 1, if the task -+ * is ineligible, it will also be ineligible when -+ * it is migrated to the destination cpu. -+ */ -+static inline int task_is_ineligible_on_dst_cpu(struct task_struct *p, int dest_cpu) -+{ -+ struct cfs_rq *dst_cfs_rq; -+ -+#ifdef CONFIG_FAIR_GROUP_SCHED -+ dst_cfs_rq = task_group(p)->cfs_rq[dest_cpu]; -+#else -+ dst_cfs_rq = &cpu_rq(dest_cpu)->cfs; -+#endif -+ if (sched_feat(PLACE_LAG) && dst_cfs_rq->nr_queued && -+ !entity_eligible(task_cfs_rq(p), &p->se)) -+ return 1; -+ -+ return 0; -+} -+ - /* - * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? - */ - static - int can_migrate_task(struct task_struct *p, struct lb_env *env) - { -- int tsk_cache_hot; -+ long degrades, hot; - - lockdep_assert_rq_held(env->src_rq); -+ if (p->sched_task_hot) -+ p->sched_task_hot = 0; - - /* - * We do not migrate tasks that are: -- * 1) throttled_lb_pair, or -- * 2) cannot be migrated to this CPU due to cpus_ptr, or -- * 3) running (obviously), or -- * 4) are cache-hot on their current CPU. -+ * 1) delayed dequeued unless we migrate load, or -+ * 2) throttled_lb_pair, or -+ * 3) cannot be migrated to this CPU due to cpus_ptr, or -+ * 4) running (obviously), or -+ * 5) are cache-hot on their current CPU. - */ -+ if ((p->se.sched_delayed) && (env->migration_type != migrate_load)) -+ return 0; -+ - if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) - return 0; - -+ /* -+ * We want to prioritize the migration of eligible tasks. -+ * For ineligible tasks we soft-limit them and only allow -+ * them to migrate when nr_balance_failed is non-zero to -+ * avoid load-balancing trying very hard to balance the load. -+ */ -+ if (!env->sd->nr_balance_failed && -+ task_is_ineligible_on_dst_cpu(p, env->dst_cpu)) -+ return 0; -+ - /* Disregard percpu kthreads; they are where they need to be. */ - if (kthread_is_per_cpu(p)) - return 0; -@@ -9369,16 +9425,15 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) - if (env->flags & LBF_ACTIVE_LB) - return 1; - -- tsk_cache_hot = migrate_degrades_locality(p, env); -- if (tsk_cache_hot == -1) -- tsk_cache_hot = task_hot(p, env); -+ degrades = migrate_degrades_locality(p, env); -+ if (!degrades) -+ hot = task_hot(p, env); -+ else -+ hot = degrades > 0; - -- if (tsk_cache_hot <= 0 || -- env->sd->nr_balance_failed > env->sd->cache_nice_tries) { -- if (tsk_cache_hot == 1) { -- schedstat_inc(env->sd->lb_hot_gained[env->idle]); -- schedstat_inc(p->stats.nr_forced_migrations); -- } -+ if (!hot || env->sd->nr_balance_failed > env->sd->cache_nice_tries) { -+ if (hot) -+ p->sched_task_hot = 1; - return 1; - } - -@@ -9393,6 +9448,12 @@ static void detach_task(struct task_struct *p, struct lb_env *env) - { - lockdep_assert_rq_held(env->src_rq); - -+ if (p->sched_task_hot) { -+ p->sched_task_hot = 0; -+ schedstat_inc(env->sd->lb_hot_gained[env->idle]); -+ schedstat_inc(p->stats.nr_forced_migrations); -+ } -+ - deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); - set_task_cpu(p, env->dst_cpu); - } -@@ -9553,6 +9614,9 @@ static int detach_tasks(struct lb_env *env) - - continue; - next: -+ if (p->sched_task_hot) -+ schedstat_inc(p->stats.nr_failed_migrations_hot); -+ - list_move(&p->se.group_node, tasks); - } - -@@ -9695,7 +9759,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done) - if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) { - update_tg_load_avg(cfs_rq); - -- if (cfs_rq->nr_running == 0) -+ if (cfs_rq->nr_queued == 0) - update_idle_cfs_rq_clock_pelt(cfs_rq); - - if (cfs_rq == &rq->cfs) -@@ -10227,7 +10291,7 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd) - * When there is more than 1 task, the group_overloaded case already - * takes care of cpu with reduced capacity - */ -- if (rq->cfs.h_nr_running != 1) -+ if (rq->cfs.h_nr_runnable != 1) - return false; - - return check_cpu_capacity(rq, sd); -@@ -10262,7 +10327,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, - sgs->group_load += load; - sgs->group_util += cpu_util_cfs(i); - sgs->group_runnable += cpu_runnable(rq); -- sgs->sum_h_nr_running += rq->cfs.h_nr_running; -+ sgs->sum_h_nr_running += rq->cfs.h_nr_runnable; - - nr_running = rq->nr_running; - sgs->sum_nr_running += nr_running; -@@ -10577,7 +10646,7 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd, - sgs->group_util += cpu_util_without(i, p); - sgs->group_runnable += cpu_runnable_without(rq, p); - local = task_running_on_cpu(i, p); -- sgs->sum_h_nr_running += rq->cfs.h_nr_running - local; -+ sgs->sum_h_nr_running += rq->cfs.h_nr_runnable - local; - - nr_running = rq->nr_running - local; - sgs->sum_nr_running += nr_running; -@@ -11359,7 +11428,7 @@ static struct rq *sched_balance_find_src_rq(struct lb_env *env, - if (rt > env->fbq_type) - continue; - -- nr_running = rq->cfs.h_nr_running; -+ nr_running = rq->cfs.h_nr_runnable; - if (!nr_running) - continue; - -@@ -11518,7 +11587,7 @@ static int need_active_balance(struct lb_env *env) - * available on dst_cpu. - */ - if (env->idle && -- (env->src_rq->cfs.h_nr_running == 1)) { -+ (env->src_rq->cfs.h_nr_runnable == 1)) { - if ((check_cpu_capacity(env->src_rq, sd)) && - (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) - return 1; -@@ -11598,6 +11667,28 @@ static int should_we_balance(struct lb_env *env) - return group_balance_cpu(sg) == env->dst_cpu; - } - -+static void update_lb_imbalance_stat(struct lb_env *env, struct sched_domain *sd, -+ enum cpu_idle_type idle) -+{ -+ if (!schedstat_enabled()) -+ return; -+ -+ switch (env->migration_type) { -+ case migrate_load: -+ __schedstat_add(sd->lb_imbalance_load[idle], env->imbalance); -+ break; -+ case migrate_util: -+ __schedstat_add(sd->lb_imbalance_util[idle], env->imbalance); -+ break; -+ case migrate_task: -+ __schedstat_add(sd->lb_imbalance_task[idle], env->imbalance); -+ break; -+ case migrate_misfit: -+ __schedstat_add(sd->lb_imbalance_misfit[idle], env->imbalance); -+ break; -+ } -+} -+ - /* - * Check this_cpu to ensure it is balanced within domain. Attempt to move - * tasks if there is an imbalance. -@@ -11648,7 +11739,7 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq, - - WARN_ON_ONCE(busiest == env.dst_rq); - -- schedstat_add(sd->lb_imbalance[idle], env.imbalance); -+ update_lb_imbalance_stat(&env, sd, idle); - - env.src_cpu = busiest->cpu; - env.src_rq = busiest; -@@ -12146,16 +12237,13 @@ static inline int on_null_domain(struct rq *rq) - * - When one of the busy CPUs notices that there may be an idle rebalancing - * needed, they will kick the idle load balancer, which then does idle - * load balancing for all the idle CPUs. -- * -- * - HK_TYPE_MISC CPUs are used for this task, because HK_TYPE_SCHED is not set -- * anywhere yet. - */ - static inline int find_new_ilb(void) - { - const struct cpumask *hk_mask; - int ilb_cpu; - -- hk_mask = housekeeping_cpumask(HK_TYPE_MISC); -+ hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE); - - for_each_cpu_and(ilb_cpu, nohz.idle_cpus_mask, hk_mask) { - -@@ -12173,7 +12261,8 @@ static inline int find_new_ilb(void) - * Kick a CPU to do the NOHZ balancing, if it is time for it, via a cross-CPU - * SMP function call (IPI). - * -- * We pick the first idle CPU in the HK_TYPE_MISC housekeeping set (if there is one). -+ * We pick the first idle CPU in the HK_TYPE_KERNEL_NOISE housekeeping set -+ * (if there is one). - */ - static void kick_ilb(unsigned int flags) - { -@@ -12261,7 +12350,7 @@ static void nohz_balancer_kick(struct rq *rq) - * If there's a runnable CFS task and the current CPU has reduced - * capacity, kick the ILB to see if there's a better CPU to run on: - */ -- if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) { -+ if (rq->cfs.h_nr_runnable >= 1 && check_cpu_capacity(rq, sd)) { - flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; - goto unlock; - } -@@ -12393,10 +12482,6 @@ void nohz_balance_enter_idle(int cpu) - if (!cpu_active(cpu)) - return; - -- /* Spare idle load balancing on CPUs that don't want to be disturbed: */ -- if (!housekeeping_cpu(cpu, HK_TYPE_SCHED)) -- return; -- - /* - * Can be set safely without rq->lock held - * If a clear happens, it will have evaluated last additions because -@@ -12616,13 +12701,6 @@ static void nohz_newidle_balance(struct rq *this_rq) - { - int this_cpu = this_rq->cpu; - -- /* -- * This CPU doesn't want to be disturbed by scheduler -- * housekeeping -- */ -- if (!housekeeping_cpu(this_cpu, HK_TYPE_SCHED)) -- return; -- - /* Will wake up very soon. No time for doing anything else*/ - if (this_rq->avg_idle < sysctl_sched_migration_cost) - return; -@@ -12759,11 +12837,11 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf) - * have been enqueued in the meantime. Since we're not going idle, - * pretend we pulled a task. - */ -- if (this_rq->cfs.h_nr_running && !pulled_task) -+ if (this_rq->cfs.h_nr_queued && !pulled_task) - pulled_task = 1; - - /* Is there a task of a high priority class? */ -- if (this_rq->nr_running != this_rq->cfs.h_nr_running) -+ if (this_rq->nr_running != this_rq->cfs.h_nr_queued) - pulled_task = -1; - - out: -@@ -12784,9 +12862,9 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf) - /* - * This softirq handler is triggered via SCHED_SOFTIRQ from two places: - * -- * - directly from the local scheduler_tick() for periodic load balancing -+ * - directly from the local sched_tick() for periodic load balancing - * -- * - indirectly from a remote scheduler_tick() for NOHZ idle balancing -+ * - indirectly from a remote sched_tick() for NOHZ idle balancing - * through the SMP cross-call nohz_csd_func() - */ - static __latent_entropy void sched_balance_softirq(void) -@@ -12877,7 +12955,7 @@ static inline void task_tick_core(struct rq *rq, struct task_struct *curr) - * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check - * if we need to give up the CPU. - */ -- if (rq->core->core_forceidle_count && rq->cfs.nr_running == 1 && -+ if (rq->core->core_forceidle_count && rq->cfs.nr_queued == 1 && - __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE)) - resched_curr(rq); - } -@@ -13021,7 +13099,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) - if (!task_on_rq_queued(p)) - return; - -- if (rq->cfs.nr_running == 1) -+ if (rq->cfs.nr_queued == 1) - return; - - /* -@@ -13431,7 +13509,7 @@ int sched_group_set_idle(struct task_group *tg, long idle) - for_each_possible_cpu(i) { - struct rq *rq = cpu_rq(i); - struct sched_entity *se = tg->se[i]; -- struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i]; -+ struct cfs_rq *grp_cfs_rq = tg->cfs_rq[i]; - bool was_idle = cfs_rq_is_idle(grp_cfs_rq); - long idle_task_delta; - struct rq_flags rf; -@@ -13442,16 +13520,8 @@ int sched_group_set_idle(struct task_group *tg, long idle) - if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq))) - goto next_cpu; - -- if (se->on_rq) { -- parent_cfs_rq = cfs_rq_of(se); -- if (cfs_rq_is_idle(grp_cfs_rq)) -- parent_cfs_rq->idle_nr_running++; -- else -- parent_cfs_rq->idle_nr_running--; -- } -- -- idle_task_delta = grp_cfs_rq->h_nr_running - -- grp_cfs_rq->idle_h_nr_running; -+ idle_task_delta = grp_cfs_rq->h_nr_queued - -+ grp_cfs_rq->h_nr_idle; - if (!cfs_rq_is_idle(grp_cfs_rq)) - idle_task_delta *= -1; - -@@ -13461,7 +13531,7 @@ int sched_group_set_idle(struct task_group *tg, long idle) - if (!se->on_rq) - break; - -- cfs_rq->idle_h_nr_running += idle_task_delta; -+ cfs_rq->h_nr_idle += idle_task_delta; - - /* Already accounted at parent level and above. */ - if (cfs_rq_is_idle(cfs_rq)) -diff --git a/kernel/sched/features.h b/kernel/sched/features.h -index a3d331dd2d8f..3c12d9f93331 100644 ---- a/kernel/sched/features.h -+++ b/kernel/sched/features.h -@@ -31,6 +31,15 @@ SCHED_FEAT(PREEMPT_SHORT, true) - */ - SCHED_FEAT(NEXT_BUDDY, false) - -+/* -+ * Allow completely ignoring cfs_rq->next; which can be set from various -+ * places: -+ * - NEXT_BUDDY (wakeup preemption) -+ * - yield_to_task() -+ * - cgroup dequeue / pick -+ */ -+SCHED_FEAT(PICK_BUDDY, true) -+ - /* - * Consider buddies to be cache hot, decreases the likeliness of a - * cache buddy being migrated away, increases cache locality. -diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c -index 5891e715f00d..81bc8b329ef1 100644 ---- a/kernel/sched/isolation.c -+++ b/kernel/sched/isolation.c -@@ -9,15 +9,9 @@ - */ - - enum hk_flags { -- HK_FLAG_TIMER = BIT(HK_TYPE_TIMER), -- HK_FLAG_RCU = BIT(HK_TYPE_RCU), -- HK_FLAG_MISC = BIT(HK_TYPE_MISC), -- HK_FLAG_SCHED = BIT(HK_TYPE_SCHED), -- HK_FLAG_TICK = BIT(HK_TYPE_TICK), - HK_FLAG_DOMAIN = BIT(HK_TYPE_DOMAIN), -- HK_FLAG_WQ = BIT(HK_TYPE_WQ), - HK_FLAG_MANAGED_IRQ = BIT(HK_TYPE_MANAGED_IRQ), -- HK_FLAG_KTHREAD = BIT(HK_TYPE_KTHREAD), -+ HK_FLAG_KERNEL_NOISE = BIT(HK_TYPE_KERNEL_NOISE), - }; - - DEFINE_STATIC_KEY_FALSE(housekeeping_overridden); -@@ -97,7 +91,7 @@ void __init housekeeping_init(void) - - static_branch_enable(&housekeeping_overridden); - -- if (housekeeping.flags & HK_FLAG_TICK) -+ if (housekeeping.flags & HK_FLAG_KERNEL_NOISE) - sched_tick_offload_init(); - - for_each_set_bit(type, &housekeeping.flags, HK_TYPE_MAX) { -@@ -121,7 +115,7 @@ static int __init housekeeping_setup(char *str, unsigned long flags) - unsigned int first_cpu; - int err = 0; - -- if ((flags & HK_FLAG_TICK) && !(housekeeping.flags & HK_FLAG_TICK)) { -+ if ((flags & HK_FLAG_KERNEL_NOISE) && !(housekeeping.flags & HK_FLAG_KERNEL_NOISE)) { - if (!IS_ENABLED(CONFIG_NO_HZ_FULL)) { - pr_warn("Housekeeping: nohz unsupported." - " Build with CONFIG_NO_HZ_FULL\n"); -@@ -177,7 +171,7 @@ static int __init housekeeping_setup(char *str, unsigned long flags) - housekeeping_setup_type(type, housekeeping_staging); - } - -- if ((flags & HK_FLAG_TICK) && !(housekeeping.flags & HK_FLAG_TICK)) -+ if ((flags & HK_FLAG_KERNEL_NOISE) && !(housekeeping.flags & HK_FLAG_KERNEL_NOISE)) - tick_nohz_full_setup(non_housekeeping_mask); - - housekeeping.flags |= flags; -@@ -195,8 +189,7 @@ static int __init housekeeping_nohz_full_setup(char *str) - { - unsigned long flags; - -- flags = HK_FLAG_TICK | HK_FLAG_WQ | HK_FLAG_TIMER | HK_FLAG_RCU | -- HK_FLAG_MISC | HK_FLAG_KTHREAD; -+ flags = HK_FLAG_KERNEL_NOISE; - - return housekeeping_setup(str, flags); - } -@@ -210,9 +203,12 @@ static int __init housekeeping_isolcpus_setup(char *str) - int len; - - while (isalpha(*str)) { -+ /* -+ * isolcpus=nohz is equivalent to nohz_full. -+ */ - if (!strncmp(str, "nohz,", 5)) { - str += 5; -- flags |= HK_FLAG_TICK; -+ flags |= HK_FLAG_KERNEL_NOISE; - continue; - } - -diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c -index fee75cc2c47b..7a8534a2deff 100644 ---- a/kernel/sched/pelt.c -+++ b/kernel/sched/pelt.c -@@ -275,7 +275,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load) - * - * group: [ see update_cfs_group() ] - * se_weight() = tg->weight * grq->load_avg / tg->load_avg -- * se_runnable() = grq->h_nr_running -+ * se_runnable() = grq->h_nr_runnable - * - * runnable_sum = se_runnable() * runnable = grq->runnable_sum - * runnable_avg = runnable_sum -@@ -321,7 +321,7 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq) - { - if (___update_load_sum(now, &cfs_rq->avg, - scale_load_down(cfs_rq->load.weight), -- cfs_rq->h_nr_running - cfs_rq->h_nr_delayed, -+ cfs_rq->h_nr_runnable, - cfs_rq->curr != NULL)) { - - ___update_load_avg(&cfs_rq->avg, 1); -diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c -index 84dad1511d1e..bb56805e3d47 100644 ---- a/kernel/sched/psi.c -+++ b/kernel/sched/psi.c -@@ -998,7 +998,7 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st - s64 delta; - u64 irq; - -- if (static_branch_likely(&psi_disabled)) -+ if (static_branch_likely(&psi_disabled) || !irqtime_enabled()) - return; - - if (!curr->pid) -@@ -1240,6 +1240,11 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res) - if (static_branch_likely(&psi_disabled)) - return -EOPNOTSUPP; - -+#ifdef CONFIG_IRQ_TIME_ACCOUNTING -+ if (!irqtime_enabled() && res == PSI_IRQ) -+ return -EOPNOTSUPP; -+#endif -+ - /* Update averages before reporting them */ - mutex_lock(&group->avgs_lock); - now = sched_clock(); -diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index c5d67a43fe52..c7cf4cc57cdd 100644 ---- a/kernel/sched/sched.h -+++ b/kernel/sched/sched.h -@@ -362,7 +362,7 @@ extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); - extern bool __checkparam_dl(const struct sched_attr *attr); - extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); - extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); --extern int dl_bw_check_overflow(int cpu); -+extern int dl_bw_deactivate(int cpu); - extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec); - /* - * SCHED_DEADLINE supports servers (nested scheduling) with the following -@@ -650,11 +650,10 @@ struct balance_callback { - /* CFS-related fields in a runqueue */ - struct cfs_rq { - struct load_weight load; -- unsigned int nr_running; -- unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ -- unsigned int idle_nr_running; /* SCHED_IDLE */ -- unsigned int idle_h_nr_running; /* SCHED_IDLE */ -- unsigned int h_nr_delayed; -+ unsigned int nr_queued; -+ unsigned int h_nr_queued; /* SCHED_{NORMAL,BATCH,IDLE} */ -+ unsigned int h_nr_runnable; /* SCHED_{NORMAL,BATCH,IDLE} */ -+ unsigned int h_nr_idle; /* SCHED_IDLE */ - - s64 avg_vruntime; - u64 avg_load; -@@ -904,11 +903,8 @@ struct dl_rq { - - static inline void se_update_runnable(struct sched_entity *se) - { -- if (!entity_is_task(se)) { -- struct cfs_rq *cfs_rq = se->my_q; -- -- se->runnable_weight = cfs_rq->h_nr_running - cfs_rq->h_nr_delayed; -- } -+ if (!entity_is_task(se)) -+ se->runnable_weight = se->my_q->h_nr_runnable; - } - - static inline long se_runnable(struct sched_entity *se) -@@ -2280,7 +2276,7 @@ static inline int task_on_cpu(struct rq *rq, struct task_struct *p) - - static inline int task_on_rq_queued(struct task_struct *p) - { -- return p->on_rq == TASK_ON_RQ_QUEUED; -+ return READ_ONCE(p->on_rq) == TASK_ON_RQ_QUEUED; - } - - static inline int task_on_rq_migrating(struct task_struct *p) -@@ -2574,7 +2570,7 @@ static inline bool sched_rt_runnable(struct rq *rq) - - static inline bool sched_fair_runnable(struct rq *rq) - { -- return rq->cfs.nr_running > 0; -+ return rq->cfs.nr_queued > 0; - } - - extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); -@@ -3242,6 +3238,12 @@ struct irqtime { - }; - - DECLARE_PER_CPU(struct irqtime, cpu_irqtime); -+DECLARE_STATIC_KEY_FALSE(sched_clock_irqtime); -+ -+static inline int irqtime_enabled(void) -+{ -+ return static_branch_likely(&sched_clock_irqtime); -+} - - /* - * Returns the irqtime minus the softirq time computed by ksoftirqd. -@@ -3262,6 +3264,13 @@ static inline u64 irq_time_read(int cpu) - return total; - } - -+#else -+ -+static inline int irqtime_enabled(void) -+{ -+ return 0; -+} -+ - #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ - - #ifdef CONFIG_CPU_FREQ -@@ -3509,6 +3518,8 @@ unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned - - #endif /* !CONFIG_HAVE_SCHED_AVG_IRQ */ - -+extern void __setparam_fair(struct task_struct *p, const struct sched_attr *attr); -+ - #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) - - #define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus))) -diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c -index eb0cdcd4d921..4346fd81c31f 100644 ---- a/kernel/sched/stats.c -+++ b/kernel/sched/stats.c -@@ -103,7 +103,7 @@ void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, - * Bump this up when changing the output format or the meaning of an existing - * format, so that tools can adapt (or abort) - */ --#define SCHEDSTAT_VERSION 16 -+#define SCHEDSTAT_VERSION 17 - - static int show_schedstat(struct seq_file *seq, void *v) - { -@@ -138,14 +138,17 @@ static int show_schedstat(struct seq_file *seq, void *v) - for_each_domain(cpu, sd) { - enum cpu_idle_type itype; - -- seq_printf(seq, "domain%d %*pb", dcount++, -+ seq_printf(seq, "domain%d %s %*pb", dcount++, sd->name, - cpumask_pr_args(sched_domain_span(sd))); - for (itype = 0; itype < CPU_MAX_IDLE_TYPES; itype++) { -- seq_printf(seq, " %u %u %u %u %u %u %u %u", -+ seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u", - sd->lb_count[itype], - sd->lb_balanced[itype], - sd->lb_failed[itype], -- sd->lb_imbalance[itype], -+ sd->lb_imbalance_load[itype], -+ sd->lb_imbalance_util[itype], -+ sd->lb_imbalance_task[itype], -+ sd->lb_imbalance_misfit[itype], - sd->lb_gained[itype], - sd->lb_hot_gained[itype], - sd->lb_nobusyq[itype], -diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h -index 8ee0add5a48a..6ade91bce63e 100644 ---- a/kernel/sched/stats.h -+++ b/kernel/sched/stats.h -@@ -138,6 +138,10 @@ static inline void psi_enqueue(struct task_struct *p, int flags) - if (flags & ENQUEUE_RESTORE) - return; - -+ /* psi_sched_switch() will handle the flags */ -+ if (task_on_cpu(task_rq(p), p)) -+ return; -+ - if (p->se.sched_delayed) { - /* CPU migration of "sleeping" task */ - SCHED_WARN_ON(!(flags & ENQUEUE_MIGRATED)); -diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c -index ff0e5ab4e37c..149e2c8036d3 100644 ---- a/kernel/sched/syscalls.c -+++ b/kernel/sched/syscalls.c -@@ -300,20 +300,10 @@ static void __setscheduler_params(struct task_struct *p, - - p->policy = policy; - -- if (dl_policy(policy)) { -+ if (dl_policy(policy)) - __setparam_dl(p, attr); -- } else if (fair_policy(policy)) { -- p->static_prio = NICE_TO_PRIO(attr->sched_nice); -- if (attr->sched_runtime) { -- p->se.custom_slice = 1; -- p->se.slice = clamp_t(u64, attr->sched_runtime, -- NSEC_PER_MSEC/10, /* HZ=1000 * 10 */ -- NSEC_PER_MSEC*100); /* HZ=100 / 10 */ -- } else { -- p->se.custom_slice = 0; -- p->se.slice = sysctl_sched_base_slice; -- } -- } -+ else if (fair_policy(policy)) -+ __setparam_fair(p, attr); - - /* rt-policy tasks do not have a timerslack */ - if (rt_or_dl_task_policy(p)) { -@@ -1433,7 +1423,7 @@ int __sched yield_to(struct task_struct *p, bool preempt) - struct rq *rq, *p_rq; - int yielded = 0; - -- scoped_guard (irqsave) { -+ scoped_guard (raw_spinlock_irqsave, &p->pi_lock) { - rq = this_rq(); - - again: -diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c -index 9748a4c8d668..da33ec9e94ab 100644 ---- a/kernel/sched/topology.c -+++ b/kernel/sched/topology.c -@@ -1635,9 +1635,7 @@ sd_init(struct sched_domain_topology_level *tl, - .max_newidle_lb_cost = 0, - .last_decay_max_lb_cost = jiffies, - .child = child, --#ifdef CONFIG_SCHED_DEBUG - .name = tl->name, --#endif - }; - - sd_span = sched_domain_span(sd); -@@ -2338,10 +2336,8 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve - if (!cpumask_subset(sched_domain_span(child), - sched_domain_span(sd))) { - pr_err("BUG: arch topology borken\n"); --#ifdef CONFIG_SCHED_DEBUG - pr_err(" the %s domain not a subset of the %s domain\n", - child->name, sd->name); --#endif - /* Fixup, ensure @sd has at least @child CPUs. */ - cpumask_or(sched_domain_span(sd), - sched_domain_span(sd), -@@ -2721,9 +2717,11 @@ void partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[], - - /* - * This domain won't be destroyed and as such -- * its dl_bw->total_bw needs to be cleared. It -- * will be recomputed in function -- * update_tasks_root_domain(). -+ * its dl_bw->total_bw needs to be cleared. -+ * Tasks contribution will be then recomputed -+ * in function dl_update_tasks_root_domain(), -+ * dl_servers contribution in function -+ * dl_restore_server_root_domain(). - */ - rd = cpu_rq(cpumask_any(doms_cur[i]))->rd; - dl_clear_root_domain(rd); diff --git a/patches/linux/6.13/tlb-flush-optimization.patch b/patches/linux/6.13/tlb-flush-optimization.patch deleted file mode 100644 index f626aa5..0000000 --- a/patches/linux/6.13/tlb-flush-optimization.patch +++ /dev/null @@ -1,139 +0,0 @@ -diff --git a/mm/vmscan.c b/mm/vmscan.c -index bd489c1af..1bd510622 100644 ---- a/mm/vmscan.c -+++ b/mm/vmscan.c -@@ -1035,6 +1035,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, - struct folio_batch free_folios; - LIST_HEAD(ret_folios); - LIST_HEAD(demote_folios); -+ LIST_HEAD(pageout_list); - unsigned int nr_reclaimed = 0; - unsigned int pgactivate = 0; - bool do_demote_pass; -@@ -1351,46 +1352,3 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, -- /* -- * Folio is dirty. Flush the TLB if a writable entry -- * potentially exists to avoid CPU writes after I/O -- * starts and then write it out here. -- */ -- try_to_unmap_flush_dirty(); -- switch (pageout(folio, mapping, &plug, folio_list)) { -- case PAGE_KEEP: -- goto keep_locked; -- case PAGE_ACTIVATE: -- /* -- * If shmem folio is split when writeback to swap, -- * the tail pages will make their own pass through -- * this function and be accounted then. -- */ -- if (nr_pages > 1 && !folio_test_large(folio)) { -- sc->nr_scanned -= (nr_pages - 1); -- nr_pages = 1; -- } -- goto activate_locked; -- case PAGE_SUCCESS: -- if (nr_pages > 1 && !folio_test_large(folio)) { -- sc->nr_scanned -= (nr_pages - 1); -- nr_pages = 1; -- } -- stat->nr_pageout += nr_pages; -- -- if (folio_test_writeback(folio)) -- goto keep; -- if (folio_test_dirty(folio)) -- goto keep; -- -- /* -- * A synchronous write - probably a ramdisk. Go -- * ahead and try to reclaim the folio. -- */ -- if (!folio_trylock(folio)) -- goto keep; -- if (folio_test_dirty(folio) || -- folio_test_writeback(folio)) -- goto keep_locked; -- mapping = folio_mapping(folio); -- fallthrough; -- case PAGE_CLEAN: -- ; /* try to free the folio below */ -- } -+ /* Add to pageout list for defered bio submissions */ -+ list_add(&folio->lru, &pageout_list); -+ continue; -@@ -1494,6 +1465,76 @@ static unsigned int shrink_folio_list(struct list_head *folio_list, - } - /* 'folio_list' is always empty here */ - -+ if (!list_empty(&pageout_list)) { -+ /* -+ * Batch TLB flushes by flushing once before processing all dirty pages. -+ * Since we operate on one PMD at a time, this batches TLB flushes at -+ * PMD granularity rather than per-page, reducing IPIs. -+ */ -+ struct address_space *mapping; -+ try_to_unmap_flush_dirty(); -+ -+ while (!list_empty(&pageout_list)) { -+ struct folio *folio = lru_to_folio(&pageout_list); -+ list_del(&folio->lru); -+ -+ /* Recheck if page got reactivated */ -+ if (folio_test_active(folio) || -+ (folio_mapped(folio) && folio_test_young(folio))) -+ goto skip_pageout_locked; -+ -+ mapping = folio_mapping(folio); -+ pageout_t pageout_res = pageout(folio, mapping, &plug, folio_list); -+ switch (pageout_res) { -+ case PAGE_KEEP: -+ goto skip_pageout_locked; -+ case PAGE_ACTIVATE: -+ goto skip_pageout_locked; -+ case PAGE_SUCCESS: -+ stat->nr_pageout += folio_nr_pages(folio); -+ -+ if (folio_test_writeback(folio) || -+ folio_test_dirty(folio)) -+ goto skip_pageout; -+ -+ /* -+ * A synchronous write - probably a ramdisk. Go -+ * ahead and try to reclaim the folio. -+ */ -+ if (!folio_trylock(folio)) -+ goto skip_pageout; -+ if (folio_test_dirty(folio) || -+ folio_test_writeback(folio)) -+ goto skip_pageout_locked; -+ -+ // Try to free the page -+ if (!mapping || -+ !__remove_mapping(mapping, folio, true, -+ sc->target_mem_cgroup)) -+ goto skip_pageout_locked; -+ -+ nr_reclaimed += folio_nr_pages(folio); -+ folio_unlock(folio); -+ continue; -+ -+ case PAGE_CLEAN: -+ if (!mapping || -+ !__remove_mapping(mapping, folio, true, -+ sc->target_mem_cgroup)) -+ goto skip_pageout_locked; -+ -+ nr_reclaimed += folio_nr_pages(folio); -+ folio_unlock(folio); -+ continue; -+ } -+ -+skip_pageout_locked: -+ folio_unlock(folio); -+skip_pageout: -+ list_add(&folio->lru, &ret_folios); -+ } -+ } -+ - /* Migrate folios selected for demotion */ - nr_reclaimed += demote_folio_list(&demote_folios, pgdat); - /* Folios that could not be demoted are still in @demote_folios */ diff --git a/patches/nvidia/6.14/comment-out-date.patch b/patches/nvidia/6.14/comment-out-date.patch deleted file mode 100644 index d995a90..0000000 --- a/patches/nvidia/6.14/comment-out-date.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/kernel/nvidia-drm/nvidia-drm-drv.c b/kernel/nvidia-drm/nvidia-drm-drv.c -index 8f905f82..c88fca3e 100644 ---- a/kernel/nvidia-drm/nvidia-drm-drv.c -+++ b/kernel/nvidia-drm/nvidia-drm-drv.c -@@ -1904,7 +1904,7 @@ static struct drm_driver nv_drm_driver = { - .name = "nvidia-drm", - - .desc = "NVIDIA DRM driver", -- .date = "20160202", -+ //.date = "20160202", - - #if defined(NV_DRM_DRIVER_HAS_DEVICE_LIST) - .device_list = LIST_HEAD_INIT(nv_drm_driver.device_list),