Merge branches 'acpi-osl', 'acpi-bus' and 'acpi-tables'
Merge low-level ACPICA interface changes, an _SB-scope _OSC handshake update and a data-only ACPI tables parsing code update for 6.8-rc1: - Switch over ACPI to using a threaded interrupt handler for the SCI (Rafael J. Wysocki). - Allow ACPI Notify () handlers to run on all CPUs and clean up the ACPI interface for deferred events processing (Rafael J. Wysocki). - Switch over the ACPI EC driver to using a threaded handler for the dedicated IRQ on systems without the EC GPE (Rafael J. Wysocki). - Adjust code using ACPICA spinlocks and the ACPI EC driver spinlock to keep local interrupts on (Rafael J. Wysocki). - Adjust the USB4 _OSC handshake to correctly handle cases in which certain types of OS control are denied by the platform (Mika Westerberg). - Correct and clean up the generic function for parsing ACPI data-only tables with array structure (Yuntao Wang). * acpi-osl: ACPI: EC: Use a spin lock without disabing interrupts ACPI: EC: Use a threaded handler for dedicated IRQ ACPI: OSL: Use spin locks without disabling interrupts ACPI: OSL: Allow Notify () handlers to run on all CPUs ACPI: OSL: Rearrange workqueue selection in acpi_os_execute() ACPI: OSL: Rework error handling in acpi_os_execute() ACPI: OSL: Use a threaded interrupt handler for SCI * acpi-bus: ACPI: Run USB4 _OSC() first with query bit set * acpi-tables: ACPI: tables: Correct and clean up the logic of acpi_parse_entries_array()
This commit is contained in:
commit
8be056a2c0
@ -408,7 +408,7 @@ static void acpi_bus_decode_usb_osc(const char *msg, u32 bits)
|
||||
static u8 sb_usb_uuid_str[] = "23A0D13A-26AB-486C-9C5F-0FFA525A575A";
|
||||
static void acpi_bus_osc_negotiate_usb_control(void)
|
||||
{
|
||||
u32 capbuf[3];
|
||||
u32 capbuf[3], *capbuf_ret;
|
||||
struct acpi_osc_context context = {
|
||||
.uuid_str = sb_usb_uuid_str,
|
||||
.rev = 1,
|
||||
@ -428,7 +428,12 @@ static void acpi_bus_osc_negotiate_usb_control(void)
|
||||
control = OSC_USB_USB3_TUNNELING | OSC_USB_DP_TUNNELING |
|
||||
OSC_USB_PCIE_TUNNELING | OSC_USB_XDOMAIN;
|
||||
|
||||
capbuf[OSC_QUERY_DWORD] = 0;
|
||||
/*
|
||||
* Run _OSC first with query bit set, trying to get control over
|
||||
* all tunneling. The platform can then clear out bits in the
|
||||
* control dword that it does not want to grant to the OS.
|
||||
*/
|
||||
capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
|
||||
capbuf[OSC_SUPPORT_DWORD] = 0;
|
||||
capbuf[OSC_CONTROL_DWORD] = control;
|
||||
|
||||
@ -441,8 +446,29 @@ static void acpi_bus_osc_negotiate_usb_control(void)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/*
|
||||
* Run _OSC again now with query bit clear and the control dword
|
||||
* matching what the platform granted (which may not have all
|
||||
* the control bits set).
|
||||
*/
|
||||
capbuf_ret = context.ret.pointer;
|
||||
|
||||
capbuf[OSC_QUERY_DWORD] = 0;
|
||||
capbuf[OSC_CONTROL_DWORD] = capbuf_ret[OSC_CONTROL_DWORD];
|
||||
|
||||
kfree(context.ret.pointer);
|
||||
|
||||
status = acpi_run_osc(handle, &context);
|
||||
if (ACPI_FAILURE(status))
|
||||
return;
|
||||
|
||||
if (context.ret.length != sizeof(capbuf)) {
|
||||
pr_info("USB4 _OSC: returned invalid length buffer\n");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
osc_sb_native_usb4_control =
|
||||
control & acpi_osc_ctx_get_pci_control(&context);
|
||||
control & acpi_osc_ctx_get_pci_control(&context);
|
||||
|
||||
acpi_bus_decode_usb_osc("USB4 _OSC: OS supports", control);
|
||||
acpi_bus_decode_usb_osc("USB4 _OSC: OS controls",
|
||||
|
@ -525,12 +525,10 @@ static void acpi_ec_clear(struct acpi_ec *ec)
|
||||
|
||||
static void acpi_ec_enable_event(struct acpi_ec *ec)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
if (acpi_ec_started(ec))
|
||||
__acpi_ec_enable_event(ec);
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
|
||||
/* Drain additional events if hardware requires that */
|
||||
if (EC_FLAGS_CLEAR_ON_RESUME)
|
||||
@ -546,11 +544,9 @@ static void __acpi_ec_flush_work(void)
|
||||
|
||||
static void acpi_ec_disable_event(struct acpi_ec *ec)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
__acpi_ec_disable_event(ec);
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
|
||||
/*
|
||||
* When ec_freeze_events is true, we need to flush events in
|
||||
@ -571,10 +567,9 @@ void acpi_ec_flush_work(void)
|
||||
|
||||
static bool acpi_ec_guard_event(struct acpi_ec *ec)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool guarded;
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
/*
|
||||
* If firmware SCI_EVT clearing timing is "event", we actually
|
||||
* don't know when the SCI_EVT will be cleared by firmware after
|
||||
@ -590,31 +585,29 @@ static bool acpi_ec_guard_event(struct acpi_ec *ec)
|
||||
guarded = ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
|
||||
ec->event_state != EC_EVENT_READY &&
|
||||
(!ec->curr || ec->curr->command != ACPI_EC_COMMAND_QUERY);
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
return guarded;
|
||||
}
|
||||
|
||||
static int ec_transaction_polled(struct acpi_ec *ec)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
|
||||
ret = 1;
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ec_transaction_completed(struct acpi_ec *ec)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
|
||||
ret = 1;
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -756,7 +749,6 @@ static int ec_guard(struct acpi_ec *ec)
|
||||
|
||||
static int ec_poll(struct acpi_ec *ec)
|
||||
{
|
||||
unsigned long flags;
|
||||
int repeat = 5; /* number of command restarts */
|
||||
|
||||
while (repeat--) {
|
||||
@ -765,14 +757,14 @@ static int ec_poll(struct acpi_ec *ec)
|
||||
do {
|
||||
if (!ec_guard(ec))
|
||||
return 0;
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
advance_transaction(ec, false);
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
} while (time_before(jiffies, delay));
|
||||
pr_debug("controller reset, restart transaction\n");
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
start_transaction(ec);
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
}
|
||||
return -ETIME;
|
||||
}
|
||||
@ -780,11 +772,10 @@ static int ec_poll(struct acpi_ec *ec)
|
||||
static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
|
||||
struct transaction *t)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int ret = 0;
|
||||
|
||||
/* start transaction */
|
||||
spin_lock_irqsave(&ec->lock, tmp);
|
||||
spin_lock(&ec->lock);
|
||||
/* Enable GPE for command processing (IBF=0/OBF=1) */
|
||||
if (!acpi_ec_submit_flushable_request(ec)) {
|
||||
ret = -EINVAL;
|
||||
@ -795,11 +786,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
|
||||
ec->curr = t;
|
||||
ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
|
||||
start_transaction(ec);
|
||||
spin_unlock_irqrestore(&ec->lock, tmp);
|
||||
spin_unlock(&ec->lock);
|
||||
|
||||
ret = ec_poll(ec);
|
||||
|
||||
spin_lock_irqsave(&ec->lock, tmp);
|
||||
spin_lock(&ec->lock);
|
||||
if (t->irq_count == ec_storm_threshold)
|
||||
acpi_ec_unmask_events(ec);
|
||||
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
|
||||
@ -808,7 +799,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
|
||||
acpi_ec_complete_request(ec);
|
||||
ec_dbg_ref(ec, "Decrease command");
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&ec->lock, tmp);
|
||||
spin_unlock(&ec->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -936,9 +927,7 @@ EXPORT_SYMBOL(ec_get_handle);
|
||||
|
||||
static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
|
||||
ec_dbg_drv("Starting EC");
|
||||
/* Enable GPE for event processing (SCI_EVT=1) */
|
||||
@ -948,31 +937,28 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
|
||||
}
|
||||
ec_log_drv("EC started");
|
||||
}
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
}
|
||||
|
||||
static bool acpi_ec_stopped(struct acpi_ec *ec)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool flushed;
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
flushed = acpi_ec_flushed(ec);
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
return flushed;
|
||||
}
|
||||
|
||||
static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
if (acpi_ec_started(ec)) {
|
||||
ec_dbg_drv("Stopping EC");
|
||||
set_bit(EC_FLAGS_STOPPED, &ec->flags);
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
wait_event(ec->wait, acpi_ec_stopped(ec));
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
/* Disable GPE for event processing (SCI_EVT=1) */
|
||||
if (!suspending) {
|
||||
acpi_ec_complete_request(ec);
|
||||
@ -983,29 +969,25 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
|
||||
clear_bit(EC_FLAGS_STOPPED, &ec->flags);
|
||||
ec_log_drv("EC stopped");
|
||||
}
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
}
|
||||
|
||||
static void acpi_ec_enter_noirq(struct acpi_ec *ec)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
ec->busy_polling = true;
|
||||
ec->polling_guard = 0;
|
||||
ec_log_drv("interrupt blocked");
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
}
|
||||
|
||||
static void acpi_ec_leave_noirq(struct acpi_ec *ec)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
ec->busy_polling = ec_busy_polling;
|
||||
ec->polling_guard = ec_polling_guard;
|
||||
ec_log_drv("interrupt unblocked");
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
}
|
||||
|
||||
void acpi_ec_block_transactions(void)
|
||||
@ -1137,9 +1119,9 @@ static void acpi_ec_event_processor(struct work_struct *work)
|
||||
|
||||
ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
|
||||
|
||||
spin_lock_irq(&ec->lock);
|
||||
spin_lock(&ec->lock);
|
||||
ec->queries_in_progress--;
|
||||
spin_unlock_irq(&ec->lock);
|
||||
spin_unlock(&ec->lock);
|
||||
|
||||
acpi_ec_put_query_handler(handler);
|
||||
kfree(q);
|
||||
@ -1202,12 +1184,12 @@ static int acpi_ec_submit_query(struct acpi_ec *ec)
|
||||
*/
|
||||
ec_dbg_evt("Query(0x%02x) scheduled", value);
|
||||
|
||||
spin_lock_irq(&ec->lock);
|
||||
spin_lock(&ec->lock);
|
||||
|
||||
ec->queries_in_progress++;
|
||||
queue_work(ec_query_wq, &q->work);
|
||||
|
||||
spin_unlock_irq(&ec->lock);
|
||||
spin_unlock(&ec->lock);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1223,14 +1205,14 @@ static void acpi_ec_event_handler(struct work_struct *work)
|
||||
|
||||
ec_dbg_evt("Event started");
|
||||
|
||||
spin_lock_irq(&ec->lock);
|
||||
spin_lock(&ec->lock);
|
||||
|
||||
while (ec->events_to_process) {
|
||||
spin_unlock_irq(&ec->lock);
|
||||
spin_unlock(&ec->lock);
|
||||
|
||||
acpi_ec_submit_query(ec);
|
||||
|
||||
spin_lock_irq(&ec->lock);
|
||||
spin_lock(&ec->lock);
|
||||
|
||||
ec->events_to_process--;
|
||||
}
|
||||
@ -1247,11 +1229,11 @@ static void acpi_ec_event_handler(struct work_struct *work)
|
||||
|
||||
ec_dbg_evt("Event stopped");
|
||||
|
||||
spin_unlock_irq(&ec->lock);
|
||||
spin_unlock(&ec->lock);
|
||||
|
||||
guard_timeout = !!ec_guard(ec);
|
||||
|
||||
spin_lock_irq(&ec->lock);
|
||||
spin_lock(&ec->lock);
|
||||
|
||||
/* Take care of SCI_EVT unless someone else is doing that. */
|
||||
if (guard_timeout && !ec->curr)
|
||||
@ -1264,7 +1246,7 @@ static void acpi_ec_event_handler(struct work_struct *work)
|
||||
|
||||
ec->events_in_progress--;
|
||||
|
||||
spin_unlock_irq(&ec->lock);
|
||||
spin_unlock(&ec->lock);
|
||||
}
|
||||
|
||||
static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt)
|
||||
@ -1289,13 +1271,11 @@ static void clear_gpe_and_advance_transaction(struct acpi_ec *ec, bool interrupt
|
||||
|
||||
static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ec->lock, flags);
|
||||
spin_lock(&ec->lock);
|
||||
|
||||
clear_gpe_and_advance_transaction(ec, true);
|
||||
|
||||
spin_unlock_irqrestore(&ec->lock, flags);
|
||||
spin_unlock(&ec->lock);
|
||||
}
|
||||
|
||||
static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
|
||||
@ -1458,8 +1438,8 @@ static bool install_gpe_event_handler(struct acpi_ec *ec)
|
||||
|
||||
static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
|
||||
{
|
||||
return request_irq(ec->irq, acpi_ec_irq_handler, IRQF_SHARED,
|
||||
"ACPI EC", ec) >= 0;
|
||||
return request_threaded_irq(ec->irq, NULL, acpi_ec_irq_handler,
|
||||
IRQF_SHARED | IRQF_ONESHOT, "ACPI EC", ec) >= 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2105,7 +2085,7 @@ bool acpi_ec_dispatch_gpe(void)
|
||||
* Dispatch the EC GPE in-band, but do not report wakeup in any case
|
||||
* to allow the caller to process events properly after that.
|
||||
*/
|
||||
spin_lock_irq(&first_ec->lock);
|
||||
spin_lock(&first_ec->lock);
|
||||
|
||||
if (acpi_ec_gpe_status_set(first_ec)) {
|
||||
pm_pr_dbg("ACPI EC GPE status set\n");
|
||||
@ -2114,7 +2094,7 @@ bool acpi_ec_dispatch_gpe(void)
|
||||
work_in_progress = acpi_ec_work_in_progress(first_ec);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&first_ec->lock);
|
||||
spin_unlock(&first_ec->lock);
|
||||
|
||||
if (!work_in_progress)
|
||||
return false;
|
||||
@ -2127,11 +2107,11 @@ bool acpi_ec_dispatch_gpe(void)
|
||||
|
||||
pm_pr_dbg("ACPI EC work flushed\n");
|
||||
|
||||
spin_lock_irq(&first_ec->lock);
|
||||
spin_lock(&first_ec->lock);
|
||||
|
||||
work_in_progress = acpi_ec_work_in_progress(first_ec);
|
||||
|
||||
spin_unlock_irq(&first_ec->lock);
|
||||
spin_unlock(&first_ec->lock);
|
||||
} while (work_in_progress && !pm_wakeup_pending());
|
||||
|
||||
return false;
|
||||
|
@ -544,11 +544,7 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
|
||||
|
||||
static irqreturn_t acpi_irq(int irq, void *dev_id)
|
||||
{
|
||||
u32 handled;
|
||||
|
||||
handled = (*acpi_irq_handler) (acpi_irq_context);
|
||||
|
||||
if (handled) {
|
||||
if ((*acpi_irq_handler)(acpi_irq_context)) {
|
||||
acpi_irq_handled++;
|
||||
return IRQ_HANDLED;
|
||||
} else {
|
||||
@ -582,7 +578,8 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
|
||||
|
||||
acpi_irq_handler = handler;
|
||||
acpi_irq_context = context;
|
||||
if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
|
||||
if (request_threaded_irq(irq, NULL, acpi_irq, IRQF_SHARED | IRQF_ONESHOT,
|
||||
"acpi", acpi_irq)) {
|
||||
pr_err("SCI (IRQ%d) allocation failed\n", irq);
|
||||
acpi_irq_handler = NULL;
|
||||
return AE_NOT_ACQUIRED;
|
||||
@ -1063,9 +1060,7 @@ int __init acpi_debugger_init(void)
|
||||
acpi_status acpi_os_execute(acpi_execute_type type,
|
||||
acpi_osd_exec_callback function, void *context)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
struct acpi_os_dpc *dpc;
|
||||
struct workqueue_struct *queue;
|
||||
int ret;
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
|
||||
@ -1076,9 +1071,9 @@ acpi_status acpi_os_execute(acpi_execute_type type,
|
||||
ret = acpi_debugger_create_thread(function, context);
|
||||
if (ret) {
|
||||
pr_err("Kernel thread creation failed\n");
|
||||
status = AE_ERROR;
|
||||
return AE_ERROR;
|
||||
}
|
||||
goto out_thread;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1096,43 +1091,41 @@ acpi_status acpi_os_execute(acpi_execute_type type,
|
||||
|
||||
dpc->function = function;
|
||||
dpc->context = context;
|
||||
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
|
||||
|
||||
/*
|
||||
* To prevent lockdep from complaining unnecessarily, make sure that
|
||||
* there is a different static lockdep key for each workqueue by using
|
||||
* INIT_WORK() for each of them separately.
|
||||
*/
|
||||
if (type == OSL_NOTIFY_HANDLER) {
|
||||
queue = kacpi_notify_wq;
|
||||
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
|
||||
} else if (type == OSL_GPE_HANDLER) {
|
||||
queue = kacpid_wq;
|
||||
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
|
||||
} else {
|
||||
switch (type) {
|
||||
case OSL_NOTIFY_HANDLER:
|
||||
ret = queue_work(kacpi_notify_wq, &dpc->work);
|
||||
break;
|
||||
case OSL_GPE_HANDLER:
|
||||
/*
|
||||
* On some machines, a software-initiated SMI causes corruption
|
||||
* unless the SMI runs on CPU 0. An SMI can be initiated by
|
||||
* any AML, but typically it's done in GPE-related methods that
|
||||
* are run via workqueues, so we can avoid the known corruption
|
||||
* cases by always queueing on CPU 0.
|
||||
*/
|
||||
ret = queue_work_on(0, kacpid_wq, &dpc->work);
|
||||
break;
|
||||
default:
|
||||
pr_err("Unsupported os_execute type %d.\n", type);
|
||||
status = AE_ERROR;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (ACPI_FAILURE(status))
|
||||
goto err_workqueue;
|
||||
|
||||
/*
|
||||
* On some machines, a software-initiated SMI causes corruption unless
|
||||
* the SMI runs on CPU 0. An SMI can be initiated by any AML, but
|
||||
* typically it's done in GPE-related methods that are run via
|
||||
* workqueues, so we can avoid the known corruption cases by always
|
||||
* queueing on CPU 0.
|
||||
*/
|
||||
ret = queue_work_on(0, queue, &dpc->work);
|
||||
if (!ret) {
|
||||
pr_err("Unable to queue work\n");
|
||||
status = AE_ERROR;
|
||||
goto err;
|
||||
}
|
||||
err_workqueue:
|
||||
if (ACPI_FAILURE(status))
|
||||
kfree(dpc);
|
||||
out_thread:
|
||||
return status;
|
||||
|
||||
return AE_OK;
|
||||
|
||||
err:
|
||||
kfree(dpc);
|
||||
return AE_ERROR;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_os_execute);
|
||||
|
||||
@ -1522,20 +1515,18 @@ void acpi_os_delete_lock(acpi_spinlock handle)
|
||||
acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
|
||||
__acquires(lockp)
|
||||
{
|
||||
acpi_cpu_flags flags;
|
||||
|
||||
spin_lock_irqsave(lockp, flags);
|
||||
return flags;
|
||||
spin_lock(lockp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Release a spinlock. See above.
|
||||
*/
|
||||
|
||||
void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
|
||||
void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags not_used)
|
||||
__releases(lockp)
|
||||
{
|
||||
spin_unlock_irqrestore(lockp, flags);
|
||||
spin_unlock(lockp);
|
||||
}
|
||||
|
||||
#ifndef ACPI_USE_LOCAL_CACHE
|
||||
@ -1672,7 +1663,7 @@ acpi_status __init acpi_os_initialize(void)
|
||||
acpi_status __init acpi_os_initialize1(void)
|
||||
{
|
||||
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
|
||||
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
|
||||
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 0);
|
||||
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
|
||||
BUG_ON(!kacpid_wq);
|
||||
BUG_ON(!kacpi_notify_wq);
|
||||
|
@ -85,11 +85,6 @@ acpi_get_subtable_type(char *id)
|
||||
return ACPI_SUBTABLE_COMMON;
|
||||
}
|
||||
|
||||
static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc)
|
||||
{
|
||||
return proc->handler || proc->handler_arg;
|
||||
}
|
||||
|
||||
static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
|
||||
union acpi_subtable_headers *hdr,
|
||||
unsigned long end)
|
||||
@ -133,7 +128,6 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
|
||||
unsigned long table_end, subtable_len, entry_len;
|
||||
struct acpi_subtable_entry entry;
|
||||
int count = 0;
|
||||
int errs = 0;
|
||||
int i;
|
||||
|
||||
table_end = (unsigned long)table_header + table_header->length;
|
||||
@ -145,25 +139,19 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
|
||||
((unsigned long)table_header + table_size);
|
||||
subtable_len = acpi_get_subtable_header_length(&entry);
|
||||
|
||||
while (((unsigned long)entry.hdr) + subtable_len < table_end) {
|
||||
if (max_entries && count >= max_entries)
|
||||
break;
|
||||
|
||||
while (((unsigned long)entry.hdr) + subtable_len < table_end) {
|
||||
for (i = 0; i < proc_num; i++) {
|
||||
if (acpi_get_entry_type(&entry) != proc[i].id)
|
||||
continue;
|
||||
if (!has_handler(&proc[i]) ||
|
||||
(!errs &&
|
||||
call_handler(&proc[i], entry.hdr, table_end))) {
|
||||
errs++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!max_entries || count < max_entries)
|
||||
if (call_handler(&proc[i], entry.hdr, table_end))
|
||||
return -EINVAL;
|
||||
|
||||
proc[i].count++;
|
||||
count++;
|
||||
break;
|
||||
}
|
||||
if (i != proc_num)
|
||||
count++;
|
||||
|
||||
/*
|
||||
* If entry->length is 0, break from this loop to avoid
|
||||
@ -180,9 +168,9 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
|
||||
}
|
||||
|
||||
if (max_entries && count > max_entries) {
|
||||
pr_warn("[%4.4s:0x%02x] found the maximum %i entries\n",
|
||||
id, proc->id, count);
|
||||
pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n",
|
||||
id, proc->id, count - max_entries, count);
|
||||
}
|
||||
|
||||
return errs ? -EINVAL : count;
|
||||
return count;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user