platform-drivers-x86 for v6.8-2

Highlights:
  -  WMI bus driver fixes
  -  Second attempt (previously reverted) at P2SB PCI rescan deadlock fix
  -  AMD PMF driver improvements
  -  MAINTAINERS updates
  -  Misc. other small fixes and hw-id additions
 
 The following is an automated git shortlog grouped by driver:
 
 MAINTAINERS:
  -  remove defunct acpi4asus project info from asus notebooks section
  -  add Luke Jones as maintainer for asus notebooks
  -  Remove Perry Yuan as DELL WMI HARDWARE PRIVACY SUPPORT maintainer
 
 intel-uncore-freq:
  -  Fix types in sysfs callbacks
 
 intel-wmi-sbl-fw-update:
  -  Fix function name in error message
 
 p2sb:
  -  Use pci_resource_n() in p2sb_read_bar0()
  -  Allow p2sb_bar() calls during PCI device probe
 
 platform/mellanox:
  -  mlxbf-pmc: Fix offset calculation for crspace events
  -  mlxbf-tmfifo: Drop Tx network packet when Tx TmFIFO is full
 
 platform/x86/amd/pmf:
  -  Fix memory leak in amd_pmf_get_pb_data()
  -  Get ambient light information from AMD SFH driver
  -  Get Human presence information from AMD SFH driver
 
 platform/x86/intel/ifs:
  -  Call release_firmware() when handling errors.
 
 silicom-platform:
  -  Add missing "Description:" for power_cycle sysfs attr
 
 touchscreen_dmi:
  -  Add info for the TECLAST X16 Plus tablet
 
 wmi:
  -  Fix wmi_dev_probe()
  -  Fix notify callback locking
  -  Decouple legacy WMI notify handlers from wmi_block_list
  -  Return immediately if an suitable WMI event is found
  -  Fix error handling in legacy WMI notify handler functions
 -----BEGIN PGP SIGNATURE-----
 
 iQFIBAABCAAyFiEEuvA7XScYQRpenhd+kuxHeUQDJ9wFAmW1JScUHGhkZWdvZWRl
 QHJlZGhhdC5jb20ACgkQkuxHeUQDJ9xNVwf/YXSuNEw+ztLH0pEySBUATHrcIbO7
 gOpW2ZISf6IzRe7HFw7Ea1IJxrvysPn8VEribT3Sot9Ka+Pzd6H/TVA64sfyE7oG
 wEke2Uxpnfie65Yo2IYNADhfLTOyAL7mvchScQz5hTE+gBq5Fdac2ykK+ox1dpTs
 BqPg1/yG06L1SRX2Id0UNNYGMBsmjUH6v2b+M8Rcba+qcdznGMRe7l8T1Q2fY+nl
 P6+tz3rYdfrGn1j+35Wo2bgKaB8l6rrtOscIvpke+CxZ95+6UxqZfLOBCg8u/njA
 QbWqfZGjmbRGrbNo4C3fAHjj6SzQNyNfsm4gd4eJzl8X1CR9gzM8kb/xmg==
 =yoe7
 -----END PGP SIGNATURE-----
gpgsig -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQSCSUwRdwTNL2MhaBlZrE9hU+XOMQUCZcTBOwAKCRBZrE9hU+XO
 MWqxAQCI45uMAAJyya3FEVUaUNO1fgC1q8gIG9KoRMACys+HnAEAzR9JNjQOJ8g2
 ccMpCtfVJxxlBJw3U8L6p+2FWkrt5gU=
 =5exp
 -----END PGP SIGNATURE-----

Merge tag 'platform-drivers-x86-v6.8-2' into pdx/for-next

Merge tag 'platform-drivers-x86-v6.8-2' fixes into pdf86/for-next
because of WMI fixes. The WMI changes done in for-next already created
a minor conflict with the fixes and WMI is actively being improved
currently so besides resolving the current conflict, this is also to
avoid further conflicts.
This commit is contained in:
Ilpo Järvinen 2024-02-08 13:49:31 +02:00
commit d16c9a3d97
14 changed files with 472 additions and 189 deletions

View File

@ -10,6 +10,7 @@ What: /sys/devices/platform/silicom-platform/power_cycle
Date: November 2023 Date: November 2023
KernelVersion: 6.7 KernelVersion: 6.7
Contact: Henry Shi <henrys@silicom-usa.com> Contact: Henry Shi <henrys@silicom-usa.com>
Description:
This file allow user to power cycle the platform. This file allow user to power cycle the platform.
Default value is 0; when set to 1, it powers down Default value is 0; when set to 1, it powers down
the platform, waits 5 seconds, then powers on the the platform, waits 5 seconds, then powers on the

View File

@ -3168,10 +3168,10 @@ F: drivers/hwmon/asus-ec-sensors.c
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
M: Corentin Chary <corentin.chary@gmail.com> M: Corentin Chary <corentin.chary@gmail.com>
L: acpi4asus-user@lists.sourceforge.net M: Luke D. Jones <luke@ljones.dev>
L: platform-driver-x86@vger.kernel.org L: platform-driver-x86@vger.kernel.org
S: Maintained S: Maintained
W: http://acpi4asus.sf.net W: https://asus-linux.org/
F: drivers/platform/x86/asus*.c F: drivers/platform/x86/asus*.c
F: drivers/platform/x86/eeepc*.c F: drivers/platform/x86/eeepc*.c
@ -5958,7 +5958,6 @@ S: Maintained
F: drivers/platform/x86/dell/dell-wmi-descriptor.c F: drivers/platform/x86/dell/dell-wmi-descriptor.c
DELL WMI HARDWARE PRIVACY SUPPORT DELL WMI HARDWARE PRIVACY SUPPORT
M: Perry Yuan <Perry.Yuan@dell.com>
L: Dell.Client.Kernel@dell.com L: Dell.Client.Kernel@dell.com
L: platform-driver-x86@vger.kernel.org L: platform-driver-x86@vger.kernel.org
S: Maintained S: Maintained

View File

@ -1170,7 +1170,7 @@ static int mlxbf_pmc_program_crspace_counter(int blk_num, uint32_t cnt_num,
int ret; int ret;
addr = pmc->block[blk_num].mmio_base + addr = pmc->block[blk_num].mmio_base +
(rounddown(cnt_num, 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ); ((cnt_num / 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
ret = mlxbf_pmc_readl(addr, &word); ret = mlxbf_pmc_readl(addr, &word);
if (ret) if (ret)
return ret; return ret;
@ -1413,7 +1413,7 @@ static int mlxbf_pmc_read_crspace_event(int blk_num, uint32_t cnt_num,
int ret; int ret;
addr = pmc->block[blk_num].mmio_base + addr = pmc->block[blk_num].mmio_base +
(rounddown(cnt_num, 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ); ((cnt_num / 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
ret = mlxbf_pmc_readl(addr, &word); ret = mlxbf_pmc_readl(addr, &word);
if (ret) if (ret)
return ret; return ret;

View File

@ -47,6 +47,9 @@
/* Message with data needs at least two words (for header & data). */ /* Message with data needs at least two words (for header & data). */
#define MLXBF_TMFIFO_DATA_MIN_WORDS 2 #define MLXBF_TMFIFO_DATA_MIN_WORDS 2
/* Tx timeout in milliseconds. */
#define TMFIFO_TX_TIMEOUT 2000
/* ACPI UID for BlueField-3. */ /* ACPI UID for BlueField-3. */
#define TMFIFO_BF3_UID 1 #define TMFIFO_BF3_UID 1
@ -62,12 +65,14 @@ struct mlxbf_tmfifo;
* @drop_desc: dummy desc for packet dropping * @drop_desc: dummy desc for packet dropping
* @cur_len: processed length of the current descriptor * @cur_len: processed length of the current descriptor
* @rem_len: remaining length of the pending packet * @rem_len: remaining length of the pending packet
* @rem_padding: remaining bytes to send as paddings
* @pkt_len: total length of the pending packet * @pkt_len: total length of the pending packet
* @next_avail: next avail descriptor id * @next_avail: next avail descriptor id
* @num: vring size (number of descriptors) * @num: vring size (number of descriptors)
* @align: vring alignment size * @align: vring alignment size
* @index: vring index * @index: vring index
* @vdev_id: vring virtio id (VIRTIO_ID_xxx) * @vdev_id: vring virtio id (VIRTIO_ID_xxx)
* @tx_timeout: expire time of last tx packet
* @fifo: pointer to the tmfifo structure * @fifo: pointer to the tmfifo structure
*/ */
struct mlxbf_tmfifo_vring { struct mlxbf_tmfifo_vring {
@ -79,12 +84,14 @@ struct mlxbf_tmfifo_vring {
struct vring_desc drop_desc; struct vring_desc drop_desc;
int cur_len; int cur_len;
int rem_len; int rem_len;
int rem_padding;
u32 pkt_len; u32 pkt_len;
u16 next_avail; u16 next_avail;
int num; int num;
int align; int align;
int index; int index;
int vdev_id; int vdev_id;
unsigned long tx_timeout;
struct mlxbf_tmfifo *fifo; struct mlxbf_tmfifo *fifo;
}; };
@ -819,6 +826,50 @@ mlxbf_tmfifo_desc_done:
return true; return true;
} }
static void mlxbf_tmfifo_check_tx_timeout(struct mlxbf_tmfifo_vring *vring)
{
unsigned long flags;
/* Only handle Tx timeout for network vdev. */
if (vring->vdev_id != VIRTIO_ID_NET)
return;
/* Initialize the timeout or return if not expired. */
if (!vring->tx_timeout) {
/* Initialize the timeout. */
vring->tx_timeout = jiffies +
msecs_to_jiffies(TMFIFO_TX_TIMEOUT);
return;
} else if (time_before(jiffies, vring->tx_timeout)) {
/* Return if not timeout yet. */
return;
}
/*
* Drop the packet after timeout. The outstanding packet is
* released and the remaining bytes will be sent with padding byte 0x00
* as a recovery. On the peer(host) side, the padding bytes 0x00 will be
* either dropped directly, or appended into existing outstanding packet
* thus dropped as corrupted network packet.
*/
vring->rem_padding = round_up(vring->rem_len, sizeof(u64));
mlxbf_tmfifo_release_pkt(vring);
vring->cur_len = 0;
vring->rem_len = 0;
vring->fifo->vring[0] = NULL;
/*
* Make sure the load/store are in order before
* returning back to virtio.
*/
virtio_mb(false);
/* Notify upper layer. */
spin_lock_irqsave(&vring->fifo->spin_lock[0], flags);
vring_interrupt(0, vring->vq);
spin_unlock_irqrestore(&vring->fifo->spin_lock[0], flags);
}
/* Rx & Tx processing of a queue. */ /* Rx & Tx processing of a queue. */
static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx) static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
{ {
@ -841,6 +892,7 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
return; return;
do { do {
retry:
/* Get available FIFO space. */ /* Get available FIFO space. */
if (avail == 0) { if (avail == 0) {
if (is_rx) if (is_rx)
@ -851,6 +903,17 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
break; break;
} }
/* Insert paddings for discarded Tx packet. */
if (!is_rx) {
vring->tx_timeout = 0;
while (vring->rem_padding >= sizeof(u64)) {
writeq(0, vring->fifo->tx.data);
vring->rem_padding -= sizeof(u64);
if (--avail == 0)
goto retry;
}
}
/* Console output always comes from the Tx buffer. */ /* Console output always comes from the Tx buffer. */
if (!is_rx && devid == VIRTIO_ID_CONSOLE) { if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
mlxbf_tmfifo_console_tx(fifo, avail); mlxbf_tmfifo_console_tx(fifo, avail);
@ -860,6 +923,10 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
/* Handle one descriptor. */ /* Handle one descriptor. */
more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail); more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
} while (more); } while (more);
/* Check Tx timeout. */
if (avail <= 0 && !is_rx)
mlxbf_tmfifo_check_tx_timeout(vring);
} }
/* Handle Rx or Tx queues. */ /* Handle Rx or Tx queues. */

View File

@ -10,6 +10,7 @@ config AMD_PMF
depends on AMD_NB depends on AMD_NB
select ACPI_PLATFORM_PROFILE select ACPI_PLATFORM_PROFILE
depends on TEE && AMDTEE depends on TEE && AMDTEE
depends on AMD_SFH_HID
help help
This driver provides support for the AMD Platform Management Framework. This driver provides support for the AMD Platform Management Framework.
The goal is to enhance end user experience by making AMD PCs smarter, The goal is to enhance end user experience by making AMD PCs smarter,

View File

@ -10,6 +10,7 @@
*/ */
#include <acpi/button.h> #include <acpi/button.h>
#include <linux/amd-pmf-io.h>
#include <linux/power_supply.h> #include <linux/power_supply.h>
#include <linux/units.h> #include <linux/units.h>
#include "pmf.h" #include "pmf.h"
@ -44,6 +45,8 @@ void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *
dev_dbg(dev->dev, "Max C0 Residency: %u\n", in->ev_info.max_c0residency); dev_dbg(dev->dev, "Max C0 Residency: %u\n", in->ev_info.max_c0residency);
dev_dbg(dev->dev, "GFX Busy: %u\n", in->ev_info.gfx_busy); dev_dbg(dev->dev, "GFX Busy: %u\n", in->ev_info.gfx_busy);
dev_dbg(dev->dev, "LID State: %s\n", in->ev_info.lid_state ? "close" : "open"); dev_dbg(dev->dev, "LID State: %s\n", in->ev_info.lid_state ? "close" : "open");
dev_dbg(dev->dev, "User Presence: %s\n", in->ev_info.user_present ? "Present" : "Away");
dev_dbg(dev->dev, "Ambient Light: %d\n", in->ev_info.ambient_light);
dev_dbg(dev->dev, "==== TA inputs END ====\n"); dev_dbg(dev->dev, "==== TA inputs END ====\n");
} }
#else #else
@ -147,6 +150,38 @@ static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_
return 0; return 0;
} }
static int amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
{
struct amd_sfh_info sfh_info;
int ret;
/* Get ALS data */
ret = amd_get_sfh_info(&sfh_info, MT_ALS);
if (!ret)
in->ev_info.ambient_light = sfh_info.ambient_light;
else
return ret;
/* get HPD data */
ret = amd_get_sfh_info(&sfh_info, MT_HPD);
if (ret)
return ret;
switch (sfh_info.user_present) {
case SFH_NOT_DETECTED:
in->ev_info.user_present = 0xff; /* assume no sensors connected */
break;
case SFH_USER_PRESENT:
in->ev_info.user_present = 1;
break;
case SFH_USER_AWAY:
in->ev_info.user_present = 0;
break;
}
return 0;
}
void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in) void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
{ {
/* TA side lid open is 1 and close is 0, hence the ! here */ /* TA side lid open is 1 and close is 0, hence the ! here */
@ -155,4 +190,5 @@ void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_tab
amd_pmf_get_smu_info(dev, in); amd_pmf_get_smu_info(dev, in);
amd_pmf_get_battery_info(dev, in); amd_pmf_get_battery_info(dev, in);
amd_pmf_get_slider_info(dev, in); amd_pmf_get_slider_info(dev, in);
amd_pmf_get_sensor_info(dev, in);
} }

View File

@ -298,8 +298,10 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
if (!new_policy_buf) if (!new_policy_buf)
return -ENOMEM; return -ENOMEM;
if (copy_from_user(new_policy_buf, buf, length)) if (copy_from_user(new_policy_buf, buf, length)) {
kfree(new_policy_buf);
return -EFAULT; return -EFAULT;
}
kfree(dev->policy_buf); kfree(dev->policy_buf);
dev->policy_buf = new_policy_buf; dev->policy_buf = new_policy_buf;

View File

@ -399,7 +399,8 @@ int ifs_load_firmware(struct device *dev)
if (fw->size != expected_size) { if (fw->size != expected_size) {
dev_err(dev, "File size mismatch (expected %u, actual %zu). Corrupted IFS image.\n", dev_err(dev, "File size mismatch (expected %u, actual %zu). Corrupted IFS image.\n",
expected_size, fw->size); expected_size, fw->size);
return -EINVAL; ret = -EINVAL;
goto release;
} }
ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data); ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data);

View File

@ -23,23 +23,23 @@ static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned
static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max); static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max);
static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq); static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq);
static ssize_t show_domain_id(struct device *dev, struct device_attribute *attr, char *buf) static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{ {
struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_dev_attr); struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_kobj_attr);
return sprintf(buf, "%u\n", data->domain_id); return sprintf(buf, "%u\n", data->domain_id);
} }
static ssize_t show_fabric_cluster_id(struct device *dev, struct device_attribute *attr, char *buf) static ssize_t show_fabric_cluster_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{ {
struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_dev_attr); struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_kobj_attr);
return sprintf(buf, "%u\n", data->cluster_id); return sprintf(buf, "%u\n", data->cluster_id);
} }
static ssize_t show_package_id(struct device *dev, struct device_attribute *attr, char *buf) static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
{ {
struct uncore_data *data = container_of(attr, struct uncore_data, package_id_dev_attr); struct uncore_data *data = container_of(attr, struct uncore_data, package_id_kobj_attr);
return sprintf(buf, "%u\n", data->package_id); return sprintf(buf, "%u\n", data->package_id);
} }
@ -97,30 +97,30 @@ static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf)
} }
#define store_uncore_min_max(name, min_max) \ #define store_uncore_min_max(name, min_max) \
static ssize_t store_##name(struct device *dev, \ static ssize_t store_##name(struct kobject *kobj, \
struct device_attribute *attr, \ struct kobj_attribute *attr, \
const char *buf, size_t count) \ const char *buf, size_t count) \
{ \ { \
struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\ \
return store_min_max_freq_khz(data, buf, count, \ return store_min_max_freq_khz(data, buf, count, \
min_max); \ min_max); \
} }
#define show_uncore_min_max(name, min_max) \ #define show_uncore_min_max(name, min_max) \
static ssize_t show_##name(struct device *dev, \ static ssize_t show_##name(struct kobject *kobj, \
struct device_attribute *attr, char *buf)\ struct kobj_attribute *attr, char *buf)\
{ \ { \
struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\ \
return show_min_max_freq_khz(data, buf, min_max); \ return show_min_max_freq_khz(data, buf, min_max); \
} }
#define show_uncore_perf_status(name) \ #define show_uncore_perf_status(name) \
static ssize_t show_##name(struct device *dev, \ static ssize_t show_##name(struct kobject *kobj, \
struct device_attribute *attr, char *buf)\ struct kobj_attribute *attr, char *buf)\
{ \ { \
struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\ struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
\ \
return show_perf_status_freq_khz(data, buf); \ return show_perf_status_freq_khz(data, buf); \
} }
@ -134,11 +134,11 @@ show_uncore_min_max(max_freq_khz, 1);
show_uncore_perf_status(current_freq_khz); show_uncore_perf_status(current_freq_khz);
#define show_uncore_data(member_name) \ #define show_uncore_data(member_name) \
static ssize_t show_##member_name(struct device *dev, \ static ssize_t show_##member_name(struct kobject *kobj, \
struct device_attribute *attr, char *buf)\ struct kobj_attribute *attr, char *buf)\
{ \ { \
struct uncore_data *data = container_of(attr, struct uncore_data,\ struct uncore_data *data = container_of(attr, struct uncore_data,\
member_name##_dev_attr);\ member_name##_kobj_attr);\
\ \
return sysfs_emit(buf, "%u\n", \ return sysfs_emit(buf, "%u\n", \
data->member_name); \ data->member_name); \
@ -149,29 +149,29 @@ show_uncore_data(initial_max_freq_khz);
#define init_attribute_rw(_name) \ #define init_attribute_rw(_name) \
do { \ do { \
sysfs_attr_init(&data->_name##_dev_attr.attr); \ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
data->_name##_dev_attr.show = show_##_name; \ data->_name##_kobj_attr.show = show_##_name; \
data->_name##_dev_attr.store = store_##_name; \ data->_name##_kobj_attr.store = store_##_name; \
data->_name##_dev_attr.attr.name = #_name; \ data->_name##_kobj_attr.attr.name = #_name; \
data->_name##_dev_attr.attr.mode = 0644; \ data->_name##_kobj_attr.attr.mode = 0644; \
} while (0) } while (0)
#define init_attribute_ro(_name) \ #define init_attribute_ro(_name) \
do { \ do { \
sysfs_attr_init(&data->_name##_dev_attr.attr); \ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
data->_name##_dev_attr.show = show_##_name; \ data->_name##_kobj_attr.show = show_##_name; \
data->_name##_dev_attr.store = NULL; \ data->_name##_kobj_attr.store = NULL; \
data->_name##_dev_attr.attr.name = #_name; \ data->_name##_kobj_attr.attr.name = #_name; \
data->_name##_dev_attr.attr.mode = 0444; \ data->_name##_kobj_attr.attr.mode = 0444; \
} while (0) } while (0)
#define init_attribute_root_ro(_name) \ #define init_attribute_root_ro(_name) \
do { \ do { \
sysfs_attr_init(&data->_name##_dev_attr.attr); \ sysfs_attr_init(&data->_name##_kobj_attr.attr); \
data->_name##_dev_attr.show = show_##_name; \ data->_name##_kobj_attr.show = show_##_name; \
data->_name##_dev_attr.store = NULL; \ data->_name##_kobj_attr.store = NULL; \
data->_name##_dev_attr.attr.name = #_name; \ data->_name##_kobj_attr.attr.name = #_name; \
data->_name##_dev_attr.attr.mode = 0400; \ data->_name##_kobj_attr.attr.mode = 0400; \
} while (0) } while (0)
static int create_attr_group(struct uncore_data *data, char *name) static int create_attr_group(struct uncore_data *data, char *name)
@ -186,21 +186,21 @@ static int create_attr_group(struct uncore_data *data, char *name)
if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) { if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) {
init_attribute_root_ro(domain_id); init_attribute_root_ro(domain_id);
data->uncore_attrs[index++] = &data->domain_id_dev_attr.attr; data->uncore_attrs[index++] = &data->domain_id_kobj_attr.attr;
init_attribute_root_ro(fabric_cluster_id); init_attribute_root_ro(fabric_cluster_id);
data->uncore_attrs[index++] = &data->fabric_cluster_id_dev_attr.attr; data->uncore_attrs[index++] = &data->fabric_cluster_id_kobj_attr.attr;
init_attribute_root_ro(package_id); init_attribute_root_ro(package_id);
data->uncore_attrs[index++] = &data->package_id_dev_attr.attr; data->uncore_attrs[index++] = &data->package_id_kobj_attr.attr;
} }
data->uncore_attrs[index++] = &data->max_freq_khz_dev_attr.attr; data->uncore_attrs[index++] = &data->max_freq_khz_kobj_attr.attr;
data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr; data->uncore_attrs[index++] = &data->min_freq_khz_kobj_attr.attr;
data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr; data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr; data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
ret = uncore_read_freq(data, &freq); ret = uncore_read_freq(data, &freq);
if (!ret) if (!ret)
data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr; data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
data->uncore_attrs[index] = NULL; data->uncore_attrs[index] = NULL;

View File

@ -26,14 +26,14 @@
* @instance_id: Unique instance id to append to directory name * @instance_id: Unique instance id to append to directory name
* @name: Sysfs entry name for this instance * @name: Sysfs entry name for this instance
* @uncore_attr_group: Attribute group storage * @uncore_attr_group: Attribute group storage
* @max_freq_khz_dev_attr: Storage for device attribute max_freq_khz * @max_freq_khz_kobj_attr: Storage for kobject attribute max_freq_khz
* @mix_freq_khz_dev_attr: Storage for device attribute min_freq_khz * @mix_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz
* @initial_max_freq_khz_dev_attr: Storage for device attribute initial_max_freq_khz * @initial_max_freq_khz_kobj_attr: Storage for kobject attribute initial_max_freq_khz
* @initial_min_freq_khz_dev_attr: Storage for device attribute initial_min_freq_khz * @initial_min_freq_khz_kobj_attr: Storage for kobject attribute initial_min_freq_khz
* @current_freq_khz_dev_attr: Storage for device attribute current_freq_khz * @current_freq_khz_kobj_attr: Storage for kobject attribute current_freq_khz
* @domain_id_dev_attr: Storage for device attribute domain_id * @domain_id_kobj_attr: Storage for kobject attribute domain_id
* @fabric_cluster_id_dev_attr: Storage for device attribute fabric_cluster_id * @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id
* @package_id_dev_attr: Storage for device attribute package_id * @package_id_kobj_attr: Storage for kobject attribute package_id
* @uncore_attrs: Attribute storage for group creation * @uncore_attrs: Attribute storage for group creation
* *
* This structure is used to encapsulate all data related to uncore sysfs * This structure is used to encapsulate all data related to uncore sysfs
@ -53,14 +53,14 @@ struct uncore_data {
char name[32]; char name[32];
struct attribute_group uncore_attr_group; struct attribute_group uncore_attr_group;
struct device_attribute max_freq_khz_dev_attr; struct kobj_attribute max_freq_khz_kobj_attr;
struct device_attribute min_freq_khz_dev_attr; struct kobj_attribute min_freq_khz_kobj_attr;
struct device_attribute initial_max_freq_khz_dev_attr; struct kobj_attribute initial_max_freq_khz_kobj_attr;
struct device_attribute initial_min_freq_khz_dev_attr; struct kobj_attribute initial_min_freq_khz_kobj_attr;
struct device_attribute current_freq_khz_dev_attr; struct kobj_attribute current_freq_khz_kobj_attr;
struct device_attribute domain_id_dev_attr; struct kobj_attribute domain_id_kobj_attr;
struct device_attribute fabric_cluster_id_dev_attr; struct kobj_attribute fabric_cluster_id_kobj_attr;
struct device_attribute package_id_dev_attr; struct kobj_attribute package_id_kobj_attr;
struct attribute *uncore_attrs[9]; struct attribute *uncore_attrs[9];
}; };

View File

@ -32,7 +32,7 @@ static int get_fwu_request(struct device *dev, u32 *out)
return -ENODEV; return -ENODEV;
if (obj->type != ACPI_TYPE_INTEGER) { if (obj->type != ACPI_TYPE_INTEGER) {
dev_warn(dev, "wmi_query_block returned invalid value\n"); dev_warn(dev, "wmidev_block_query returned invalid value\n");
kfree(obj); kfree(obj);
return -EINVAL; return -EINVAL;
} }
@ -55,7 +55,7 @@ static int set_fwu_request(struct device *dev, u32 in)
status = wmidev_block_set(to_wmi_device(dev), 0, &input); status = wmidev_block_set(to_wmi_device(dev), 0, &input);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
dev_err(dev, "wmi_set_block failed\n"); dev_err(dev, "wmidev_block_set failed\n");
return -ENODEV; return -ENODEV;
} }

View File

@ -26,6 +26,21 @@ static const struct x86_cpu_id p2sb_cpu_ids[] = {
{} {}
}; };
/*
* Cache BAR0 of P2SB device functions 0 to 7.
* TODO: The constant 8 is the number of functions that PCI specification
* defines. Same definitions exist tree-wide. Unify this definition and
* the other definitions then move to include/uapi/linux/pci.h.
*/
#define NR_P2SB_RES_CACHE 8
struct p2sb_res_cache {
u32 bus_dev_id;
struct resource res;
};
static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
static int p2sb_get_devfn(unsigned int *devfn) static int p2sb_get_devfn(unsigned int *devfn)
{ {
unsigned int fn = P2SB_DEVFN_DEFAULT; unsigned int fn = P2SB_DEVFN_DEFAULT;
@ -39,10 +54,18 @@ static int p2sb_get_devfn(unsigned int *devfn)
return 0; return 0;
} }
/* Copy resource from the first BAR of the device in question */ static bool p2sb_valid_resource(struct resource *res)
static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
{ {
struct resource *bar0 = &pdev->resource[0]; if (res->flags)
return true;
return false;
}
/* Copy resource from the first BAR of the device in question */
static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
{
struct resource *bar0 = pci_resource_n(pdev, 0);
/* Make sure we have no dangling pointers in the output */ /* Make sure we have no dangling pointers in the output */
memset(mem, 0, sizeof(*mem)); memset(mem, 0, sizeof(*mem));
@ -56,22 +79,108 @@ static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
mem->end = bar0->end; mem->end = bar0->end;
mem->flags = bar0->flags; mem->flags = bar0->flags;
mem->desc = bar0->desc; mem->desc = bar0->desc;
}
static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
{
struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)];
struct pci_dev *pdev;
pdev = pci_scan_single_device(bus, devfn);
if (!pdev)
return;
p2sb_read_bar0(pdev, &cache->res);
cache->bus_dev_id = bus->dev.id;
pci_stop_and_remove_bus_device(pdev);
}
static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
{
unsigned int slot, fn;
if (PCI_FUNC(devfn) == 0) {
/*
* When function number of the P2SB device is zero, scan it and
* other function numbers, and if devices are available, cache
* their BAR0s.
*/
slot = PCI_SLOT(devfn);
for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
} else {
/* Scan the P2SB device and cache its BAR0 */
p2sb_scan_and_cache_devfn(bus, devfn);
}
if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
return -ENOENT;
return 0; return 0;
} }
static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem) static struct pci_bus *p2sb_get_bus(struct pci_bus *bus)
{ {
struct pci_dev *pdev; static struct pci_bus *p2sb_bus;
bus = bus ?: p2sb_bus;
if (bus)
return bus;
/* Assume P2SB is on the bus 0 in domain 0 */
p2sb_bus = pci_find_bus(0, 0);
return p2sb_bus;
}
static int p2sb_cache_resources(void)
{
unsigned int devfn_p2sb;
u32 value = P2SBC_HIDE;
struct pci_bus *bus;
u16 class;
int ret; int ret;
pdev = pci_scan_single_device(bus, devfn); /* Get devfn for P2SB device itself */
if (!pdev) ret = p2sb_get_devfn(&devfn_p2sb);
if (ret)
return ret;
bus = p2sb_get_bus(NULL);
if (!bus)
return -ENODEV; return -ENODEV;
ret = p2sb_read_bar0(pdev, mem); /*
* When a device with same devfn exists and its device class is not
* PCI_CLASS_MEMORY_OTHER for P2SB, do not touch it.
*/
pci_bus_read_config_word(bus, devfn_p2sb, PCI_CLASS_DEVICE, &class);
if (!PCI_POSSIBLE_ERROR(class) && class != PCI_CLASS_MEMORY_OTHER)
return -ENODEV;
/*
* Prevent concurrent PCI bus scan from seeing the P2SB device and
* removing via sysfs while it is temporarily exposed.
*/
pci_lock_rescan_remove();
/*
* The BIOS prevents the P2SB device from being enumerated by the PCI
* subsystem, so we need to unhide and hide it back to lookup the BAR.
* Unhide the P2SB device here, if needed.
*/
pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
if (value & P2SBC_HIDE)
pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
ret = p2sb_scan_and_cache(bus, devfn_p2sb);
/* Hide the P2SB device, if it was hidden */
if (value & P2SBC_HIDE)
pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, P2SBC_HIDE);
pci_unlock_rescan_remove();
pci_stop_and_remove_bus_device(pdev);
return ret; return ret;
} }
@ -81,64 +190,53 @@ static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct re
* @devfn: PCI slot and function to communicate with * @devfn: PCI slot and function to communicate with
* @mem: memory resource to be filled in * @mem: memory resource to be filled in
* *
* The BIOS prevents the P2SB device from being enumerated by the PCI * If @bus is NULL, the bus 0 in domain 0 will be used.
* subsystem, so we need to unhide and hide it back to lookup the BAR.
*
* if @bus is NULL, the bus 0 in domain 0 will be used.
* If @devfn is 0, it will be replaced by devfn of the P2SB device. * If @devfn is 0, it will be replaced by devfn of the P2SB device.
* *
* Caller must provide a valid pointer to @mem. * Caller must provide a valid pointer to @mem.
* *
* Locking is handled by pci_rescan_remove_lock mutex.
*
* Return: * Return:
* 0 on success or appropriate errno value on error. * 0 on success or appropriate errno value on error.
*/ */
int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem) int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
{ {
struct pci_dev *pdev_p2sb; struct p2sb_res_cache *cache;
unsigned int devfn_p2sb;
u32 value = P2SBC_HIDE;
int ret; int ret;
/* Get devfn for P2SB device itself */ bus = p2sb_get_bus(bus);
ret = p2sb_get_devfn(&devfn_p2sb); if (!bus)
if (ret)
return ret;
/* if @bus is NULL, use bus 0 in domain 0 */
bus = bus ?: pci_find_bus(0, 0);
/*
* Prevent concurrent PCI bus scan from seeing the P2SB device and
* removing via sysfs while it is temporarily exposed.
*/
pci_lock_rescan_remove();
/* Unhide the P2SB device, if needed */
pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
if (value & P2SBC_HIDE)
pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
if (devfn)
ret = p2sb_scan_and_read(bus, devfn, mem);
else
ret = p2sb_read_bar0(pdev_p2sb, mem);
pci_stop_and_remove_bus_device(pdev_p2sb);
/* Hide the P2SB device, if it was hidden */
if (value & P2SBC_HIDE)
pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, P2SBC_HIDE);
pci_unlock_rescan_remove();
if (ret)
return ret;
if (mem->flags == 0)
return -ENODEV; return -ENODEV;
if (!devfn) {
ret = p2sb_get_devfn(&devfn);
if (ret)
return ret;
}
cache = &p2sb_resources[PCI_FUNC(devfn)];
if (cache->bus_dev_id != bus->dev.id)
return -ENODEV;
if (!p2sb_valid_resource(&cache->res))
return -ENOENT;
memcpy(mem, &cache->res, sizeof(*mem));
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(p2sb_bar); EXPORT_SYMBOL_GPL(p2sb_bar);
static int __init p2sb_fs_init(void)
{
p2sb_cache_resources();
return 0;
}
/*
* pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
* not be locked in sysfs pci bus rescan path because of deadlock. To
* avoid the deadlock, access to P2SB devices with the lock at an early
* step in kernel initialization and cache required resources. This
* should happen after subsys_initcall which initializes PCI subsystem
* and before device_initcall which requires P2SB resources.
*/
fs_initcall(p2sb_fs_init);

View File

@ -944,6 +944,32 @@ static const struct ts_dmi_data teclast_tbook11_data = {
.properties = teclast_tbook11_props, .properties = teclast_tbook11_props,
}; };
static const struct property_entry teclast_x16_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data teclast_x16_plus_data = {
.embedded_fw = {
.name = "silead/gsl3692-teclast-x16-plus.fw",
.prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
.length = 43560,
.sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
},
.acpi_name = "MSSL1680:00",
.properties = teclast_x16_plus_props,
};
static const struct property_entry teclast_x3_plus_props[] = { static const struct property_entry teclast_x3_plus_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@ -1612,6 +1638,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_SKU, "E5A6_A1"), DMI_MATCH(DMI_PRODUCT_SKU, "E5A6_A1"),
}, },
}, },
{
/* Teclast X16 Plus */
.driver_data = (void *)&teclast_x16_plus_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_MATCH(DMI_PRODUCT_SKU, "D3A5_A1"),
},
},
{ {
/* Teclast X3 Plus */ /* Teclast X3 Plus */
.driver_data = (void *)&teclast_x3_plus_data, .driver_data = (void *)&teclast_x3_plus_data,

View File

@ -25,6 +25,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/rwsem.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/types.h> #include <linux/types.h>
@ -56,7 +57,6 @@ static_assert(__alignof__(struct guid_block) == 1);
enum { /* wmi_block flags */ enum { /* wmi_block flags */
WMI_READ_TAKES_NO_ARGS, WMI_READ_TAKES_NO_ARGS,
WMI_PROBED,
}; };
struct wmi_block { struct wmi_block {
@ -64,8 +64,10 @@ struct wmi_block {
struct list_head list; struct list_head list;
struct guid_block gblock; struct guid_block gblock;
struct acpi_device *acpi_device; struct acpi_device *acpi_device;
struct rw_semaphore notify_lock; /* Protects notify callback add/remove */
wmi_notify_handler handler; wmi_notify_handler handler;
void *handler_data; void *handler_data;
bool driver_ready;
unsigned long flags; unsigned long flags;
}; };
@ -199,6 +201,17 @@ static int wmidev_match_guid(struct device *dev, const void *data)
return 0; return 0;
} }
static int wmidev_match_notify_id(struct device *dev, const void *data)
{
struct wmi_block *wblock = dev_to_wblock(dev);
const u32 *notify_id = data;
if (wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *notify_id)
return 1;
return 0;
}
static const struct bus_type wmi_bus_type; static const struct bus_type wmi_bus_type;
static struct wmi_device *wmi_find_device_by_guid(const char *guid_string) static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
@ -218,6 +231,17 @@ static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
return dev_to_wdev(dev); return dev_to_wdev(dev);
} }
static struct wmi_device *wmi_find_event_by_notify_id(const u32 notify_id)
{
struct device *dev;
dev = bus_find_device(&wmi_bus_type, NULL, &notify_id, wmidev_match_notify_id);
if (!dev)
return ERR_PTR(-ENODEV);
return to_wmi_device(dev);
}
static void wmi_device_put(struct wmi_device *wdev) static void wmi_device_put(struct wmi_device *wdev)
{ {
put_device(&wdev->dev); put_device(&wdev->dev);
@ -552,32 +576,31 @@ acpi_status wmi_install_notify_handler(const char *guid,
wmi_notify_handler handler, wmi_notify_handler handler,
void *data) void *data)
{ {
struct wmi_block *block; struct wmi_block *wblock;
acpi_status status = AE_NOT_EXIST; struct wmi_device *wdev;
guid_t guid_input; acpi_status status;
if (!guid || !handler) wdev = wmi_find_device_by_guid(guid);
return AE_BAD_PARAMETER; if (IS_ERR(wdev))
return AE_ERROR;
if (guid_parse(guid, &guid_input)) wblock = container_of(wdev, struct wmi_block, dev);
return AE_BAD_PARAMETER;
list_for_each_entry(block, &wmi_block_list, list) { down_write(&wblock->notify_lock);
acpi_status wmi_status; if (wblock->handler) {
status = AE_ALREADY_ACQUIRED;
} else {
wblock->handler = handler;
wblock->handler_data = data;
if (guid_equal(&block->gblock.guid, &guid_input)) { if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
if (block->handler) dev_warn(&wblock->dev.dev, "Failed to enable device\n");
return AE_ALREADY_ACQUIRED;
block->handler = handler; status = AE_OK;
block->handler_data = data;
wmi_status = wmi_method_enable(block, true);
if ((wmi_status != AE_OK) ||
((wmi_status == AE_OK) && (status == AE_NOT_EXIST)))
status = wmi_status;
}
} }
up_write(&wblock->notify_lock);
wmi_device_put(wdev);
return status; return status;
} }
@ -593,30 +616,31 @@ EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
*/ */
acpi_status wmi_remove_notify_handler(const char *guid) acpi_status wmi_remove_notify_handler(const char *guid)
{ {
struct wmi_block *block; struct wmi_block *wblock;
acpi_status status = AE_NOT_EXIST; struct wmi_device *wdev;
guid_t guid_input; acpi_status status;
if (!guid) wdev = wmi_find_device_by_guid(guid);
return AE_BAD_PARAMETER; if (IS_ERR(wdev))
return AE_ERROR;
if (guid_parse(guid, &guid_input)) wblock = container_of(wdev, struct wmi_block, dev);
return AE_BAD_PARAMETER;
list_for_each_entry(block, &wmi_block_list, list) { down_write(&wblock->notify_lock);
acpi_status wmi_status; if (!wblock->handler) {
status = AE_NULL_ENTRY;
} else {
if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
dev_warn(&wblock->dev.dev, "Failed to disable device\n");
if (guid_equal(&block->gblock.guid, &guid_input)) { wblock->handler = NULL;
if (!block->handler) wblock->handler_data = NULL;
return AE_NULL_ENTRY;
wmi_status = wmi_method_enable(block, false); status = AE_OK;
block->handler = NULL;
block->handler_data = NULL;
if (wmi_status != AE_OK || (wmi_status == AE_OK && status == AE_NOT_EXIST))
status = wmi_status;
}
} }
up_write(&wblock->notify_lock);
wmi_device_put(wdev);
return status; return status;
} }
@ -635,15 +659,19 @@ EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out) acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
{ {
struct wmi_block *wblock; struct wmi_block *wblock;
struct wmi_device *wdev;
acpi_status status;
list_for_each_entry(wblock, &wmi_block_list, list) { wdev = wmi_find_event_by_notify_id(event);
struct guid_block *gblock = &wblock->gblock; if (IS_ERR(wdev))
return AE_NOT_FOUND;
if ((gblock->flags & ACPI_WMI_EVENT) && gblock->notify_id == event) wblock = container_of(wdev, struct wmi_block, dev);
return get_event_data(wblock, out); status = get_event_data(wblock, out);
}
return AE_NOT_FOUND; wmi_device_put(wdev);
return status;
} }
EXPORT_SYMBOL_GPL(wmi_get_event_data); EXPORT_SYMBOL_GPL(wmi_get_event_data);
@ -848,7 +876,7 @@ static int wmi_dev_probe(struct device *dev)
if (wdriver->probe) { if (wdriver->probe) {
ret = wdriver->probe(dev_to_wdev(dev), ret = wdriver->probe(dev_to_wdev(dev),
find_guid_context(wblock, wdriver)); find_guid_context(wblock, wdriver));
if (!ret) { if (ret) {
if (ACPI_FAILURE(wmi_method_enable(wblock, false))) if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
dev_warn(dev, "Failed to disable device\n"); dev_warn(dev, "Failed to disable device\n");
@ -856,7 +884,9 @@ static int wmi_dev_probe(struct device *dev)
} }
} }
set_bit(WMI_PROBED, &wblock->flags); down_write(&wblock->notify_lock);
wblock->driver_ready = true;
up_write(&wblock->notify_lock);
return 0; return 0;
} }
@ -866,7 +896,9 @@ static void wmi_dev_remove(struct device *dev)
struct wmi_block *wblock = dev_to_wblock(dev); struct wmi_block *wblock = dev_to_wblock(dev);
struct wmi_driver *wdriver = drv_to_wdrv(dev->driver); struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
clear_bit(WMI_PROBED, &wblock->flags); down_write(&wblock->notify_lock);
wblock->driver_ready = false;
up_write(&wblock->notify_lock);
if (wdriver->remove) if (wdriver->remove)
wdriver->remove(dev_to_wdev(dev)); wdriver->remove(dev_to_wdev(dev));
@ -993,6 +1025,8 @@ static int wmi_create_device(struct device *wmi_bus_dev,
wblock->dev.setable = true; wblock->dev.setable = true;
out_init: out_init:
init_rwsem(&wblock->notify_lock);
wblock->driver_ready = false;
wblock->dev.dev.bus = &wmi_bus_type; wblock->dev.dev.bus = &wmi_bus_type;
wblock->dev.dev.parent = wmi_bus_dev; wblock->dev.dev.parent = wmi_bus_dev;
@ -1163,6 +1197,26 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
} }
} }
static void wmi_notify_driver(struct wmi_block *wblock)
{
struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
struct acpi_buffer data = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
if (!driver->no_notify_data) {
status = get_event_data(wblock, &data);
if (ACPI_FAILURE(status)) {
dev_warn(&wblock->dev.dev, "Failed to get event data\n");
return;
}
}
if (driver->notify)
driver->notify(&wblock->dev, data.pointer);
kfree(data.pointer);
}
static int wmi_notify_device(struct device *dev, void *data) static int wmi_notify_device(struct device *dev, void *data)
{ {
struct wmi_block *wblock = dev_to_wblock(dev); struct wmi_block *wblock = dev_to_wblock(dev);
@ -1171,28 +1225,17 @@ static int wmi_notify_device(struct device *dev, void *data)
if (!(wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *event)) if (!(wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *event))
return 0; return 0;
/* If a driver is bound, then notify the driver. */ down_read(&wblock->notify_lock);
if (test_bit(WMI_PROBED, &wblock->flags) && wblock->dev.dev.driver) { /* The WMI driver notify handler conflicts with the legacy WMI handler.
struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver); * Because of this the WMI driver notify handler takes precedence.
struct acpi_buffer evdata = { ACPI_ALLOCATE_BUFFER, NULL }; */
acpi_status status; if (wblock->dev.dev.driver && wblock->driver_ready) {
wmi_notify_driver(wblock);
if (!driver->no_notify_data) { } else {
status = get_event_data(wblock, &evdata); if (wblock->handler)
if (ACPI_FAILURE(status)) { wblock->handler(*event, wblock->handler_data);
dev_warn(&wblock->dev.dev, "failed to get event data\n");
return -EIO;
}
}
if (driver->notify)
driver->notify(&wblock->dev, evdata.pointer);
kfree(evdata.pointer);
} else if (wblock->handler) {
/* Legacy handler */
wblock->handler(*event, wblock->handler_data);
} }
up_read(&wblock->notify_lock);
acpi_bus_generate_netlink_event("wmi", acpi_dev_name(wblock->acpi_device), *event, 0); acpi_bus_generate_netlink_event("wmi", acpi_dev_name(wblock->acpi_device), *event, 0);