diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c index d56f75b605d8..976fdf27e790 100644 --- a/drivers/gpu/drm/i915/gt/intel_gsc.c +++ b/drivers/gpu/drm/i915/gt/intel_gsc.c @@ -143,8 +143,14 @@ static void gsc_destroy_one(struct drm_i915_private *i915, struct intel_gsc_intf *intf = &gsc->intf[intf_id]; if (intf->adev) { - auxiliary_device_delete(&intf->adev->aux_dev); - auxiliary_device_uninit(&intf->adev->aux_dev); + struct auxiliary_device *aux_dev = &intf->adev->aux_dev; + + if (intf_id == 0) + intel_huc_unregister_gsc_notifier(&gsc_to_gt(gsc)->uc.huc, + aux_dev->dev.bus); + + auxiliary_device_delete(aux_dev); + auxiliary_device_uninit(aux_dev); intf->adev = NULL; } @@ -243,14 +249,24 @@ add_device: goto fail; } + intf->adev = adev; /* needed by the notifier */ + + if (intf_id == 0) + intel_huc_register_gsc_notifier(&gsc_to_gt(gsc)->uc.huc, + aux_dev->dev.bus); + ret = auxiliary_device_add(aux_dev); if (ret < 0) { drm_err(&i915->drm, "gsc aux add failed %d\n", ret); + if (intf_id == 0) + intel_huc_unregister_gsc_notifier(&gsc_to_gt(gsc)->uc.huc, + aux_dev->dev.bus); + intf->adev = NULL; + /* adev will be freed with the put_device() and .release sequence */ auxiliary_device_uninit(aux_dev); goto fail; } - intf->adev = adev; return; fail: diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index f0188931d8e4..5f2144c78f8a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -10,6 +10,9 @@ #include "intel_huc.h" #include "i915_drv.h" +#include +#include + /** * DOC: HuC * @@ -42,6 +45,164 @@ * HuC-specific commands. */ +/* + * MEI-GSC load is an async process. The probing of the exposed aux device + * (see intel_gsc.c) usually happens a few seconds after i915 probe, depending + * on when the kernel schedules it. Unless something goes terribly wrong, we're + * guaranteed for this to happen during boot, so the big timeout is a safety net + * that we never expect to need. + * MEI-PXP + HuC load usually takes ~300ms, but if the GSC needs to be resumed + * and/or reset, this can take longer. + */ +#define GSC_INIT_TIMEOUT_MS 10000 +#define PXP_INIT_TIMEOUT_MS 2000 + +static int sw_fence_dummy_notify(struct i915_sw_fence *sf, + enum i915_sw_fence_notify state) +{ + return NOTIFY_DONE; +} + +static void __delayed_huc_load_complete(struct intel_huc *huc) +{ + if (!i915_sw_fence_done(&huc->delayed_load.fence)) + i915_sw_fence_complete(&huc->delayed_load.fence); +} + +static void delayed_huc_load_complete(struct intel_huc *huc) +{ + hrtimer_cancel(&huc->delayed_load.timer); + __delayed_huc_load_complete(huc); +} + +static void __gsc_init_error(struct intel_huc *huc) +{ + huc->delayed_load.status = INTEL_HUC_DELAYED_LOAD_ERROR; + __delayed_huc_load_complete(huc); +} + +static void gsc_init_error(struct intel_huc *huc) +{ + hrtimer_cancel(&huc->delayed_load.timer); + __gsc_init_error(huc); +} + +static void gsc_init_done(struct intel_huc *huc) +{ + hrtimer_cancel(&huc->delayed_load.timer); + + /* MEI-GSC init is done, now we wait for MEI-PXP to bind */ + huc->delayed_load.status = INTEL_HUC_WAITING_ON_PXP; + if (!i915_sw_fence_done(&huc->delayed_load.fence)) + hrtimer_start(&huc->delayed_load.timer, + ms_to_ktime(PXP_INIT_TIMEOUT_MS), + HRTIMER_MODE_REL); +} + +static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrtimer) +{ + struct intel_huc *huc = container_of(hrtimer, struct intel_huc, delayed_load.timer); + + if (!intel_huc_is_authenticated(huc)) { + drm_err(&huc_to_gt(huc)->i915->drm, + "timed out waiting for GSC init to load HuC\n"); + + __gsc_init_error(huc); + } + + return HRTIMER_NORESTART; +} + +static void huc_delayed_load_start(struct intel_huc *huc) +{ + ktime_t delay; + + GEM_BUG_ON(intel_huc_is_authenticated(huc)); + + /* + * On resume we don't have to wait for MEI-GSC to be re-probed, but we + * do need to wait for MEI-PXP to reset & re-bind + */ + switch (huc->delayed_load.status) { + case INTEL_HUC_WAITING_ON_GSC: + delay = ms_to_ktime(GSC_INIT_TIMEOUT_MS); + break; + case INTEL_HUC_WAITING_ON_PXP: + delay = ms_to_ktime(PXP_INIT_TIMEOUT_MS); + break; + default: + gsc_init_error(huc); + return; + } + + /* + * This fence is always complete unless we're waiting for the + * GSC device to come up to load the HuC. We arm the fence here + * and complete it when we confirm that the HuC is loaded from + * the PXP bind callback. + */ + GEM_BUG_ON(!i915_sw_fence_done(&huc->delayed_load.fence)); + i915_sw_fence_fini(&huc->delayed_load.fence); + i915_sw_fence_reinit(&huc->delayed_load.fence); + i915_sw_fence_await(&huc->delayed_load.fence); + i915_sw_fence_commit(&huc->delayed_load.fence); + + hrtimer_start(&huc->delayed_load.timer, delay, HRTIMER_MODE_REL); +} + +static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *data) +{ + struct device *dev = data; + struct intel_huc *huc = container_of(nb, struct intel_huc, delayed_load.nb); + struct intel_gsc_intf *intf = &huc_to_gt(huc)->gsc.intf[0]; + + if (!intf->adev || &intf->adev->aux_dev.dev != dev) + return 0; + + switch (action) { + case BUS_NOTIFY_BOUND_DRIVER: /* mei driver bound to aux device */ + gsc_init_done(huc); + break; + + case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */ + case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */ + drm_info(&huc_to_gt(huc)->i915->drm, + "mei driver not bound, disabling HuC load\n"); + gsc_init_error(huc); + break; + } + + return 0; +} + +void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus) +{ + int ret; + + if (!intel_huc_is_loaded_by_gsc(huc)) + return; + + huc->delayed_load.nb.notifier_call = gsc_notifier; + ret = bus_register_notifier(bus, &huc->delayed_load.nb); + if (ret) { + drm_err(&huc_to_gt(huc)->i915->drm, + "failed to register GSC notifier\n"); + huc->delayed_load.nb.notifier_call = NULL; + gsc_init_error(huc); + } +} + +void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, struct bus_type *bus) +{ + if (!huc->delayed_load.nb.notifier_call) + return; + + delayed_huc_load_complete(huc); + + bus_unregister_notifier(bus, &huc->delayed_load.nb); + huc->delayed_load.nb.notifier_call = NULL; +} + void intel_huc_init_early(struct intel_huc *huc) { struct drm_i915_private *i915 = huc_to_gt(huc)->i915; @@ -57,6 +218,17 @@ void intel_huc_init_early(struct intel_huc *huc) huc->status.mask = HUC_FW_VERIFIED; huc->status.value = HUC_FW_VERIFIED; } + + /* + * Initialize fence to be complete as this is expected to be complete + * unless there is a delayed HuC reload in progress. + */ + i915_sw_fence_init(&huc->delayed_load.fence, + sw_fence_dummy_notify); + i915_sw_fence_commit(&huc->delayed_load.fence); + + hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + huc->delayed_load.timer.function = huc_delayed_load_timer_callback; } #define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy") @@ -122,9 +294,25 @@ void intel_huc_fini(struct intel_huc *huc) if (!intel_uc_fw_is_loadable(&huc->fw)) return; + delayed_huc_load_complete(huc); + + i915_sw_fence_fini(&huc->delayed_load.fence); intel_uc_fw_fini(&huc->fw); } +void intel_huc_suspend(struct intel_huc *huc) +{ + if (!intel_uc_fw_is_loadable(&huc->fw)) + return; + + /* + * in the unlikely case that we're suspending before the GSC has + * completed its loading sequence, just stop waiting. We'll restart + * on resume. + */ + delayed_huc_load_complete(huc); +} + int intel_huc_wait_for_auth_complete(struct intel_huc *huc) { struct intel_gt *gt = huc_to_gt(huc); @@ -136,6 +324,9 @@ int intel_huc_wait_for_auth_complete(struct intel_huc *huc) huc->status.value, 2, 50, NULL); + /* mark the load process as complete even if the wait failed */ + delayed_huc_load_complete(huc); + if (ret) { drm_err(>->i915->drm, "HuC: Firmware not verified %d\n", ret); intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); @@ -239,6 +430,12 @@ int intel_huc_check_status(struct intel_huc *huc) return intel_huc_is_authenticated(huc); } +static bool huc_has_delayed_load(struct intel_huc *huc) +{ + return intel_huc_is_loaded_by_gsc(huc) && + (huc->delayed_load.status != INTEL_HUC_DELAYED_LOAD_ERROR); +} + void intel_huc_update_auth_status(struct intel_huc *huc) { if (!intel_uc_fw_is_loadable(&huc->fw)) @@ -247,6 +444,8 @@ void intel_huc_update_auth_status(struct intel_huc *huc) if (intel_huc_is_authenticated(huc)) intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING); + else if (huc_has_delayed_load(huc)) + huc_delayed_load_start(huc); } /** diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h index 51f9d96a3ca3..915d281c1c72 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h @@ -7,9 +7,21 @@ #define _INTEL_HUC_H_ #include "i915_reg_defs.h" +#include "i915_sw_fence.h" #include "intel_uc_fw.h" #include "intel_huc_fw.h" +#include +#include + +struct bus_type; + +enum intel_huc_delayed_load_status { + INTEL_HUC_WAITING_ON_GSC = 0, + INTEL_HUC_WAITING_ON_PXP, + INTEL_HUC_DELAYED_LOAD_ERROR, +}; + struct intel_huc { /* Generic uC firmware management */ struct intel_uc_fw fw; @@ -20,17 +32,28 @@ struct intel_huc { u32 mask; u32 value; } status; + + struct { + struct i915_sw_fence fence; + struct hrtimer timer; + struct notifier_block nb; + enum intel_huc_delayed_load_status status; + } delayed_load; }; void intel_huc_init_early(struct intel_huc *huc); int intel_huc_init(struct intel_huc *huc); void intel_huc_fini(struct intel_huc *huc); +void intel_huc_suspend(struct intel_huc *huc); int intel_huc_auth(struct intel_huc *huc); int intel_huc_wait_for_auth_complete(struct intel_huc *huc); int intel_huc_check_status(struct intel_huc *huc); void intel_huc_update_auth_status(struct intel_huc *huc); bool intel_huc_is_authenticated(struct intel_huc *huc); +void intel_huc_register_gsc_notifier(struct intel_huc *huc, struct bus_type *bus); +void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, struct bus_type *bus); + static inline int intel_huc_sanitize(struct intel_huc *huc) { intel_uc_fw_sanitize(&huc->fw);