Merge tag 'drm-intel-next-2016-04-25' of git://anongit.freedesktop.org/drm-intel into drm-next

- more userptr cornercase fixes from Chris
- clean up and tune forcewake handling (Tvrtko)
- more underrun fixes from Ville, mostly for ilk to appeas CI
- fix unclaimed register warnings on vlv/chv and enable the debug code to catch
  them by default (Ville)
- skl gpu hang fixes for gt3/4 (Mika Kuoppala)
- edram improvements for gen9+ (Mika again)
- clean up gpu reset corner cases (Chris)
- fix ctx/ring machine deaths on snb/ilk (Chris)
- MOCS programming for all engines (Peter Antoine)
- robustify/clean up vlv/chv irq handler (Ville)
- split gen8+ irq handlers into ack/handle phase (Ville)
- tons of bxt rpm fixes (mostly around firmware interactions), from Imre
- hook up panel fitting for dsi panels (Ville)
- more runtime PM fixes all over from Imre
- shrinker polish (Chris)
- more guc fixes from Alex Dai and Dave Gordon
- tons of bugfixes and small polish all over (but with a big focus on bxt)

* tag 'drm-intel-next-2016-04-25' of git://anongit.freedesktop.org/drm-intel: (142 commits)
  drm/i915: Update DRIVER_DATE to 20160425
  drm/i915/bxt: Explicitly clear the Turbo control register
  drm/i915: Correct the i915_frequency_info debugfs output
  drm/i915: Macros to convert PM time interval values to microseconds
  drm/i915: Make RPS EI/thresholds multiple of 25 on SNB-BDW
  drm/i915: Fake HDMI live status
  drm/i915/bxt: Force reprogramming a PHY with invalid HW state
  drm/i915/bxt: Wait for PHY1 GRC done if PHY0 was already enabled
  drm/i915/bxt: Use PHY0 GRC value for HW state verification
  drm/i915: use dev_priv directly in gen8_ppgtt_notify_vgt
  drm/i915/bxt: Enable DC5 during runtime resume
  drm/i915/bxt: Sanitize DC state tracking during system resume
  drm/i915/bxt: Don't uninit/init display core twice during system suspend/resume
  drm/i915: Inline intel_suspend_complete
  drm/i915/kbl: Don't WARN for expected secondary MISC IO power well request
  drm/i915: Fix eDP low vswing for Broadwell
  drm/i915: check for ERR_PTR from i915_gem_object_pin_map()
  drm/i915/guc: local optimisations and updating comments
  drm/i915/guc: drop cached copy of 'wq_head'
  drm/i915/guc: keep GuC doorbell & process descriptor mapped in kernel
  ...
This commit is contained in:
Dave Airlie 2016-05-04 17:25:30 +10:00
commit fffb675106
42 changed files with 2722 additions and 1686 deletions

View File

@ -1,3 +1,20 @@
config DRM_I915_WERROR
bool "Force GCC to throw an error instead of a warning when compiling"
# As this may inadvertently break the build, only allow the user
# to shoot oneself in the foot iff they aim really hard
depends on EXPERT
# We use the dependency on !COMPILE_TEST to not be enabled in
# allmodconfig or allyesconfig configurations
depends on !COMPILE_TEST
default n
help
Add -Werror to the build flags for (and only for) i915.ko.
Do not enable this unless you are writing code for the i915.ko module.
Recommended for driver developers only.
If in doubt, say "N".
config DRM_I915_DEBUG
bool "Enable additional driver debugging"
depends on DRM_I915
@ -10,3 +27,15 @@ config DRM_I915_DEBUG
If in doubt, say "N".
config DRM_I915_DEBUG_GEM
bool "Insert extra checks into the GEM internals"
default n
depends on DRM_I915_WERROR
help
Enable extra sanity checks (including BUGs) along the GEM driver
paths that may slow the system down and if hit hang the machine.
Recommended for driver developers only.
If in doubt, say "N".

View File

@ -2,6 +2,8 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
# Please keep these build lists sorted!
# core driver code

View File

@ -89,27 +89,34 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
static const char *get_pin_flag(struct drm_i915_gem_object *obj)
static const char get_active_flag(struct drm_i915_gem_object *obj)
{
if (obj->pin_display)
return "p";
else
return " ";
return obj->active ? '*' : ' ';
}
static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
static const char get_pin_flag(struct drm_i915_gem_object *obj)
{
return obj->pin_display ? 'p' : ' ';
}
static const char get_tiling_flag(struct drm_i915_gem_object *obj)
{
switch (obj->tiling_mode) {
default:
case I915_TILING_NONE: return " ";
case I915_TILING_X: return "X";
case I915_TILING_Y: return "Y";
case I915_TILING_NONE: return ' ';
case I915_TILING_X: return 'X';
case I915_TILING_Y: return 'Y';
}
}
static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
static inline const char get_global_flag(struct drm_i915_gem_object *obj)
{
return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
}
static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{
return obj->mapping ? 'M' : ' ';
}
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
@ -136,12 +143,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
lockdep_assert_held(&obj->base.dev->struct_mutex);
seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
&obj->base,
obj->active ? "*" : " ",
get_active_flag(obj),
get_pin_flag(obj),
get_tiling_flag(obj),
get_global_flag(obj),
get_pin_mapped_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain);
@ -435,6 +443,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 count, mappable_count, purgeable_count;
u64 size, mappable_size, purgeable_size;
unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0;
u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0;
struct drm_i915_gem_object *obj;
struct drm_file *file;
struct i915_vma *vma;
@ -468,6 +478,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size += obj->base.size, ++count;
if (obj->madv == I915_MADV_DONTNEED)
purgeable_size += obj->base.size, ++purgeable_count;
if (obj->mapping) {
pin_mapped_count++;
pin_mapped_size += obj->base.size;
if (obj->pages_pin_count == 0) {
pin_mapped_purgeable_count++;
pin_mapped_purgeable_size += obj->base.size;
}
}
}
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
@ -485,6 +503,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
purgeable_size += obj->base.size;
++purgeable_count;
}
if (obj->mapping) {
pin_mapped_count++;
pin_mapped_size += obj->base.size;
if (obj->pages_pin_count == 0) {
pin_mapped_purgeable_count++;
pin_mapped_purgeable_size += obj->base.size;
}
}
}
seq_printf(m, "%u purgeable objects, %llu bytes\n",
purgeable_count, purgeable_size);
@ -492,6 +518,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
mappable_count, mappable_size);
seq_printf(m, "%u fault mappable objects, %llu bytes\n",
count, size);
seq_printf(m,
"%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n",
pin_mapped_count, pin_mapped_purgeable_count,
pin_mapped_size, pin_mapped_purgeable_size);
seq_printf(m, "%llu [%llu] gtt total\n",
ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
@ -1216,12 +1246,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
rpstat = I915_READ(GEN6_RPSTAT1);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
rpcurup = I915_READ(GEN6_RP_CUR_UP);
rpprevup = I915_READ(GEN6_RP_PREV_UP);
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
if (IS_GEN9(dev))
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@ -1261,21 +1291,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
seq_printf(m, "CAGF: %dMHz\n", cagf);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
seq_printf(m, "RP CUR UP: %d (%dus)\n",
rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
seq_printf(m, "RP PREV UP: %d (%dus)\n",
rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
seq_printf(m, "Up threshold: %d%%\n",
dev_priv->rps.up_threshold);
seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
GEN6_CURIAVG_MASK);
seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
seq_printf(m, "Down threshold: %d%%\n",
dev_priv->rps.down_threshold);
@ -1469,12 +1499,11 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_uncore_forcewake_domain *fw_domain;
int i;
spin_lock_irq(&dev_priv->uncore.lock);
for_each_fw_domain(fw_domain, dev_priv, i) {
for_each_fw_domain(fw_domain, dev_priv) {
seq_printf(m, "%s.wake_count = %u\n",
intel_uncore_forcewake_domain_to_str(i),
intel_uncore_forcewake_domain_to_str(fw_domain->id),
fw_domain->wake_count);
}
spin_unlock_irq(&dev_priv->uncore.lock);
@ -2405,10 +2434,11 @@ static int i915_llc(struct seq_file *m, void *data)
struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool edram = INTEL_GEN(dev_priv) > 8;
/* Size calculation for LLC is a bit of a pain. Ignore for now. */
seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
intel_uncore_edram_size(dev_priv)/1024/1024);
return 0;
}
@ -4723,7 +4753,7 @@ i915_wedged_get(void *data, u64 *val)
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
*val = atomic_read(&dev_priv->gpu_error.reset_counter);
*val = i915_terminally_wedged(&dev_priv->gpu_error);
return 0;
}

View File

@ -257,13 +257,6 @@ static int i915_get_bridge_dev(struct drm_device *dev)
return 0;
}
#define MCHBAR_I915 0x44
#define MCHBAR_I965 0x48
#define MCHBAR_SIZE (4*4096)
#define DEVEN_REG 0x54
#define DEVEN_MCHBAR_EN (1 << 28)
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_device *dev)
@ -325,7 +318,7 @@ intel_setup_mchbar(struct drm_device *dev)
dev_priv->mchbar_need_disable = false;
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
@ -343,7 +336,7 @@ intel_setup_mchbar(struct drm_device *dev)
/* Space is allocated or reserved, so enable it. */
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
@ -356,17 +349,24 @@ intel_teardown_mchbar(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
if (dev_priv->mchbar_need_disable) {
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
temp &= ~DEVEN_MCHBAR_EN;
pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
u32 deven_val;
pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
&deven_val);
deven_val &= ~DEVEN_MCHBAR_EN;
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
deven_val);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
temp &= ~1;
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
u32 mchbar_val;
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
&mchbar_val);
mchbar_val &= ~1;
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
mchbar_val);
}
}

View File

@ -567,10 +567,9 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
drm_modeset_unlock_all(dev);
}
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume);
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
@ -640,8 +639,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_display_set_init_power(dev_priv, false);
if (HAS_CSR(dev_priv))
flush_work(&dev_priv->csr.work);
intel_csr_ucode_suspend(dev_priv);
out:
enable_rpm_wakeref_asserts(dev_priv);
@ -657,7 +655,8 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
disable_rpm_wakeref_asserts(dev_priv);
fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
fw_csr = !IS_BROXTON(dev_priv) &&
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
/*
* In case of firmware assisted context save/restore don't manually
* deinit the power domains. This also means the CSR/DMC firmware will
@ -668,7 +667,13 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
if (!fw_csr)
intel_power_domains_suspend(dev_priv);
ret = intel_suspend_complete(dev_priv);
ret = 0;
if (IS_BROXTON(dev_priv))
bxt_enable_dc9(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_enable_pc8(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv);
if (ret) {
DRM_ERROR("Suspend complete failed: %d\n", ret);
@ -732,6 +737,8 @@ static int i915_drm_resume(struct drm_device *dev)
disable_rpm_wakeref_asserts(dev_priv);
intel_csr_ucode_resume(dev_priv);
mutex_lock(&dev->struct_mutex);
i915_gem_restore_gtt_mappings(dev);
mutex_unlock(&dev->struct_mutex);
@ -802,7 +809,7 @@ static int i915_drm_resume(struct drm_device *dev)
static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
int ret;
/*
* We have a resume ordering issue with the snd-hda driver also
@ -813,6 +820,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
* FIXME: This should be solved with a special hdmi sink device or
* similar so that power domains can be employed.
*/
/*
* Note that we need to set the power state explicitly, since we
* powered off the device during freeze and the PCI core won't power
* it back up for us during thaw. Powering off the device during
* freeze is not a hard requirement though, and during the
* suspend/resume phases the PCI core makes sure we get here with the
* device powered on. So in case we change our freeze logic and keep
* the device powered we can also remove the following set power state
* call.
*/
ret = pci_set_power_state(dev->pdev, PCI_D0);
if (ret) {
DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
goto out;
}
/*
* Note that pci_enable_device() first enables any parent bridge
* device and only then sets the power state for this device. The
* bridge enabling is a nop though, since bridge devices are resumed
* first. The order of enabling power and enabling the device is
* imposed by the PCI core as described above, so here we preserve the
* same order for the freeze/thaw phases.
*
* TODO: eventually we should remove pci_disable_device() /
* pci_enable_enable_device() from suspend/resume. Due to how they
* depend on the device enable refcount we can't anyway depend on them
* disabling/enabling the device.
*/
if (pci_enable_device(dev->pdev)) {
ret = -EIO;
goto out;
@ -830,21 +867,25 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_early_sanitize(dev, true);
if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
if (IS_BROXTON(dev)) {
if (!dev_priv->suspended_to_idle)
gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
hsw_disable_pc8(dev_priv);
}
intel_uncore_sanitize(dev);
if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
if (IS_BROXTON(dev_priv) ||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
enable_rpm_wakeref_asserts(dev_priv);
out:
dev_priv->suspended_to_idle = false;
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
@ -880,23 +921,32 @@ int i915_resume_switcheroo(struct drm_device *dev)
int i915_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool simulated;
struct i915_gpu_error *error = &dev_priv->gpu_error;
unsigned reset_counter;
int ret;
intel_reset_gt_powersave(dev);
mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev);
/* Clear any previous failed attempts at recovery. Time to try again. */
atomic_andnot(I915_WEDGED, &error->reset_counter);
simulated = dev_priv->gpu_error.stop_rings != 0;
/* Clear the reset-in-progress flag and increment the reset epoch. */
reset_counter = atomic_inc_return(&error->reset_counter);
if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
ret = -EIO;
goto error;
}
i915_gem_reset(dev);
ret = intel_gpu_reset(dev, ALL_ENGINES);
/* Also reset the gpu hangman. */
if (simulated) {
if (error->stop_rings != 0) {
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
dev_priv->gpu_error.stop_rings = 0;
error->stop_rings = 0;
if (ret == -ENODEV) {
DRM_INFO("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
@ -908,9 +958,11 @@ int i915_reset(struct drm_device *dev)
pr_notice("drm/i915: Resetting chip after gpu hang\n");
if (ret) {
DRM_ERROR("Failed to reset chip: %i\n", ret);
mutex_unlock(&dev->struct_mutex);
return ret;
if (ret != -ENODEV)
DRM_ERROR("Failed to reset chip: %i\n", ret);
else
DRM_DEBUG_DRIVER("GPU reset disabled\n");
goto error;
}
intel_overlay_reset(dev_priv);
@ -929,20 +981,14 @@ int i915_reset(struct drm_device *dev)
* was running at the time of the reset (i.e. we weren't VT
* switched away).
*/
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
dev_priv->gpu_error.reload_in_reset = true;
ret = i915_gem_init_hw(dev);
dev_priv->gpu_error.reload_in_reset = false;
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("Failed hw init on reset %d\n", ret);
return ret;
goto error;
}
mutex_unlock(&dev->struct_mutex);
/*
* rps/rc6 re-init is necessary to restore state lost after the
* reset and the re-install of gt irqs. Skip for ironlake per
@ -953,6 +999,11 @@ int i915_reset(struct drm_device *dev)
intel_enable_gt_powersave(dev);
return 0;
error:
atomic_or(I915_WEDGED, &error->reset_counter);
mutex_unlock(&dev->struct_mutex);
return ret;
}
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@ -1059,44 +1110,6 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev);
}
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
{
hsw_enable_pc8(dev_priv);
return 0;
}
static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
/* TODO: when DC5 support is added disable DC5 here. */
broxton_ddi_phy_uninit(dev);
broxton_uninit_cdclk(dev);
bxt_enable_dc9(dev_priv);
return 0;
}
static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
/* TODO: when CSR FW support is added make sure the FW is loaded */
bxt_disable_dc9(dev_priv);
/*
* TODO: when DC5 support is added enable DC5 here if the CSR FW
* is available.
*/
broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev);
return 0;
}
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
@ -1502,7 +1515,16 @@ static int intel_runtime_suspend(struct device *device)
intel_suspend_gt_powersave(dev);
intel_runtime_pm_disable_interrupts(dev_priv);
ret = intel_suspend_complete(dev_priv);
ret = 0;
if (IS_BROXTON(dev_priv)) {
bxt_display_core_uninit(dev_priv);
bxt_enable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
hsw_enable_pc8(dev_priv);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
ret = vlv_suspend_complete(dev_priv);
}
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
intel_runtime_pm_enable_interrupts(dev_priv);
@ -1576,12 +1598,17 @@ static int intel_runtime_resume(struct device *device)
if (IS_GEN6(dev_priv))
intel_init_pch_refclk(dev);
if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
if (IS_BROXTON(dev)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&
(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
gen9_enable_dc5(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
hsw_disable_pc8(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
ret = vlv_resume_prepare(dev_priv, true);
}
/*
* No point of rolling back things in case of an error, as the best
@ -1612,26 +1639,6 @@ static int intel_runtime_resume(struct device *device)
return ret;
}
/*
* This function implements common functionality of runtime and system
* suspend sequence.
*/
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
{
int ret;
if (IS_BROXTON(dev_priv))
ret = bxt_suspend_complete(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ret = hsw_suspend_complete(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret = vlv_suspend_complete(dev_priv);
else
ret = 0;
return ret;
}
static const struct dev_pm_ops i915_pm_ops = {
/*
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,

View File

@ -33,34 +33,40 @@
#include <uapi/drm/i915_drm.h>
#include <uapi/drm/drm_fourcc.h>
#include <drm/drmP.h>
#include "i915_params.h"
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
#include "intel_lrc.h"
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h>
#include <linux/backlight.h>
#include <linux/hashtable.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
#include "intel_guc.h"
#include <linux/shmem_fs.h>
#include <drm/drmP.h>
#include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h>
#include "i915_params.h"
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_dpll_mgr.h"
#include "intel_guc.h"
#include "intel_lrc.h"
#include "intel_ringbuffer.h"
#include "i915_gem.h"
#include "i915_gem_gtt.h"
#include "i915_gem_render_state.h"
/* General customization:
*/
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20160411"
#define DRIVER_DATE "20160425"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@ -634,6 +640,13 @@ enum forcewake_domains {
FORCEWAKE_MEDIA)
};
#define FW_REG_READ (1)
#define FW_REG_WRITE (2)
enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
i915_reg_t reg, unsigned int op);
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
@ -666,8 +679,9 @@ struct intel_uncore {
struct intel_uncore_forcewake_domain {
struct drm_i915_private *i915;
enum forcewake_domain_id id;
enum forcewake_domains mask;
unsigned wake_count;
struct timer_list timer;
struct hrtimer timer;
i915_reg_t reg_set;
u32 val_set;
u32 val_clear;
@ -680,14 +694,14 @@ struct intel_uncore {
};
/* Iterate over initialised fw domains */
#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
(i__) < FW_DOMAIN_ID_COUNT; \
(i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \
for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
(domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \
(domain__)++) \
for_each_if ((mask__) & (domain__)->mask)
#define for_each_fw_domain(domain__, dev_priv__, i__) \
for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
#define for_each_fw_domain(domain__, dev_priv__) \
for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__)
#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
#define CSR_VERSION_MAJOR(version) ((version) >> 16)
@ -996,6 +1010,7 @@ struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
#define GMBUS_FORCE_BIT_RETRY (1U << 31)
u32 force_bit;
u32 reg0;
i915_reg_t gpio_reg;
@ -1385,9 +1400,6 @@ struct i915_gpu_error {
/* For missed irq/seqno simulation. */
unsigned int test_irq_rings;
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
bool reload_in_reset;
};
enum modeset_restore {
@ -1444,6 +1456,7 @@ struct intel_vbt_data {
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
unsigned int fdi_rx_polarity_inverted:1;
unsigned int panel_type:4;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
@ -1863,7 +1876,7 @@ struct drm_i915_private {
struct intel_l3_parity l3_parity;
/* Cannot be determined by PCIID. You must always read a register. */
size_t ellc_size;
u32 edram_cap;
/* gen6+ rps state */
struct intel_gen6_power_mgmt rps;
@ -1911,6 +1924,7 @@ struct drm_i915_private {
* crappiness (can't read out DPLL_MD for pipes B & C).
*/
u32 chv_dpll_md[I915_MAX_PIPES];
u32 bxt_phy_grc;
u32 suspend_count;
bool suspended_to_idle;
@ -2237,6 +2251,7 @@ struct drm_i915_gem_request {
/** On Which ring this request was generated */
struct drm_i915_private *i915;
struct intel_engine_cs *engine;
unsigned reset_counter;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
@ -2317,7 +2332,6 @@ struct drm_i915_gem_request {
struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx);
void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
@ -2487,6 +2501,7 @@ struct drm_i915_cmd_table {
__p; \
})
#define INTEL_INFO(p) (&__I915__(p)->info)
#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
@ -2613,8 +2628,9 @@ struct drm_i915_cmd_table {
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
#define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED)
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
__I915__(dev)->ellc_size)
HAS_EDRAM(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
@ -2631,8 +2647,9 @@ struct drm_i915_cmd_table {
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
IS_SKL_GT3(dev) || \
IS_SKL_GT4(dev))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
* even when in MSI mode. This results in spurious interrupt warnings if the
@ -2667,7 +2684,7 @@ struct drm_i915_cmd_table {
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
IS_KABYLAKE(dev))
IS_KABYLAKE(dev) || IS_BROXTON(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
@ -2791,6 +2808,8 @@ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
static inline bool intel_vgpu_active(struct drm_device *dev)
{
@ -2869,7 +2888,6 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req);
void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
@ -3000,9 +3018,11 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
* pages and then returns a contiguous mapping of the backing storage into
* the kernel address space.
*
* The caller must hold the struct_mutex.
* The caller must hold the struct_mutex, and is responsible for calling
* i915_gem_object_unpin_map() when the mapping is no longer required.
*
* Returns the pointer through which to access the backing storage.
* Returns the pointer through which to access the mapped object, or an
* ERR_PTR() on error.
*/
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
@ -3069,23 +3089,45 @@ i915_gem_find_active_request(struct intel_engine_cs *engine);
bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
static inline u32 i915_reset_counter(struct i915_gpu_error *error)
{
return atomic_read(&error->reset_counter);
}
static inline bool __i915_reset_in_progress(u32 reset)
{
return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
}
static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
{
return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
}
static inline bool __i915_terminally_wedged(u32 reset)
{
return unlikely(reset & I915_WEDGED);
}
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
return __i915_reset_in_progress(i915_reset_counter(error));
}
static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
{
return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
return atomic_read(&error->reset_counter) & I915_WEDGED;
return __i915_terminally_wedged(i915_reset_counter(error));
}
static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
}
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
@ -3118,7 +3160,6 @@ void __i915_add_request(struct drm_i915_gem_request *req,
#define i915_add_request_no_flush(req) \
__i915_add_request(req, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct intel_rps_client *rps);
@ -3455,6 +3496,7 @@ extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
extern int intel_opregion_notify_adapter(struct drm_device *dev,
pci_power_t state);
extern int intel_opregion_get_panel_type(struct drm_device *dev);
#else
static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
static inline void intel_opregion_init(struct drm_device *dev) { return; }
@ -3470,6 +3512,10 @@ intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
{
return 0;
}
static inline int intel_opregion_get_panel_type(struct drm_device *dev)
{
return -ENODEV;
}
#endif
/* intel_acpi.c */

View File

@ -32,14 +32,13 @@
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_mocs.h"
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
#define RQ_BUG_ON(expr)
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static void
@ -85,9 +84,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
{
int ret;
#define EXIT_COND (!i915_reset_in_progress(error) || \
i915_terminally_wedged(error))
if (EXIT_COND)
if (!i915_reset_in_progress(error))
return 0;
/*
@ -96,17 +93,16 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
* we should simply try to bail out and fail as gracefully as possible.
*/
ret = wait_event_interruptible_timeout(error->reset_queue,
EXIT_COND,
!i915_reset_in_progress(error),
10*HZ);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
} else if (ret < 0) {
return ret;
} else {
return 0;
}
#undef EXIT_COND
return 0;
}
int i915_mutex_lock_interruptible(struct drm_device *dev)
@ -211,11 +207,10 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
BUG_ON(obj->madv == __I915_MADV_PURGED);
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret) {
if (WARN_ON(ret)) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
WARN_ON(ret != -EIO);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@ -1110,27 +1105,19 @@ put_rpm:
return ret;
}
int
i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible)
static int
i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
{
if (i915_reset_in_progress(error)) {
if (__i915_terminally_wedged(reset_counter))
return -EIO;
if (__i915_reset_in_progress(reset_counter)) {
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
return -EIO;
/* Recovery complete, but the reset failed ... */
if (i915_terminally_wedged(error))
return -EIO;
/*
* Check if GPU Reset is in progress - we need intel_ring_begin
* to work properly to reinit the hw state while the gpu is
* still marked as reset-in-progress. Handle this with a flag.
*/
if (!error->reload_in_reset)
return -EAGAIN;
return -EAGAIN;
}
return 0;
@ -1224,7 +1211,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
/**
* __i915_wait_request - wait until execution of request has finished
* @req: duh!
* @reset_counter: reset sequence associated with the given request
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
@ -1239,7 +1225,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
* errno with remaining time filled in timeout argument.
*/
int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct intel_rps_client *rps)
@ -1300,13 +1285,14 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
prepare_to_wait(&engine->irq_queue, &wait, state);
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
/* ... but upgrade the -EAGAIN to an -EIO if the gpu
* is truely gone. */
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret == 0)
ret = -EAGAIN;
* the request being submitted and now. If a reset has occurred,
* the request is effectively complete (we either are in the
* process of or have discarded the rendering and completely
* reset the GPU. The results of the request are lost and we
* are free to continue on with the original operation.
*/
if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
ret = 0;
break;
}
@ -1458,26 +1444,15 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
int
i915_wait_request(struct drm_i915_gem_request *req)
{
struct drm_device *dev;
struct drm_i915_private *dev_priv;
struct drm_i915_private *dev_priv = req->i915;
bool interruptible;
int ret;
BUG_ON(req == NULL);
dev = req->engine->dev;
dev_priv = dev->dev_private;
interruptible = dev_priv->mm.interruptible;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
ret = __i915_wait_request(req,
atomic_read(&dev_priv->gpu_error.reset_counter),
interruptible, NULL, NULL);
ret = __i915_wait_request(req, interruptible, NULL, NULL);
if (ret)
return ret;
@ -1521,7 +1496,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
i915_gem_object_retire__read(obj, i);
}
RQ_BUG_ON(obj->active);
GEM_BUG_ON(obj->active);
}
return 0;
@ -1552,7 +1527,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
unsigned reset_counter;
int ret, i, n = 0;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@ -1561,12 +1535,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (!obj->active)
return 0;
ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (readonly) {
struct drm_i915_gem_request *req;
@ -1588,9 +1556,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
}
mutex_unlock(&dev->struct_mutex);
ret = 0;
for (i = 0; ret == 0 && i < n; i++)
ret = __i915_wait_request(requests[i], reset_counter, true,
NULL, rps);
ret = __i915_wait_request(requests[i], true, NULL, rps);
mutex_lock(&dev->struct_mutex);
for (i = 0; i < n; i++) {
@ -1964,11 +1932,27 @@ out:
void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
/* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
* pagefault then has to wait until we release the mutex.
*/
lockdep_assert_held(&obj->base.dev->struct_mutex);
if (!obj->fault_mappable)
return;
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
/* Ensure that the CPU's PTE are revoked and there are not outstanding
* memory transactions from userspace before we return. The TLB
* flushing implied above by changing the PTE above *should* be
* sufficient, an extra barrier here just provides us with a bit
* of paranoid documentation about our requirement to serialise
* memory writes before touching registers / GSM.
*/
wmb();
obj->fault_mappable = false;
}
@ -2177,11 +2161,10 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
BUG_ON(obj->madv == __I915_MADV_PURGED);
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret) {
if (WARN_ON(ret)) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
WARN_ON(ret != -EIO);
i915_gem_clflush_object(obj, true);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@ -2470,8 +2453,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
static void
i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
{
RQ_BUG_ON(obj->last_write_req == NULL);
RQ_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
GEM_BUG_ON(obj->last_write_req == NULL);
GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
i915_gem_request_assign(&obj->last_write_req, NULL);
intel_fb_obj_flush(obj, true, ORIGIN_CS);
@ -2482,8 +2465,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
{
struct i915_vma *vma;
RQ_BUG_ON(obj->last_read_req[ring] == NULL);
RQ_BUG_ON(!(obj->active & (1 << ring)));
GEM_BUG_ON(obj->last_read_req[ring] == NULL);
GEM_BUG_ON(!(obj->active & (1 << ring)));
list_del_init(&obj->engine_list[ring]);
i915_gem_request_assign(&obj->last_read_req[ring], NULL);
@ -2743,6 +2726,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
struct drm_i915_gem_request **req_out)
{
struct drm_i915_private *dev_priv = to_i915(engine->dev);
unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
struct drm_i915_gem_request *req;
int ret;
@ -2751,6 +2735,14 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
*req_out = NULL;
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
* and restart.
*/
ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
if (ret)
return ret;
req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
if (req == NULL)
return -ENOMEM;
@ -2762,6 +2754,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
kref_init(&req->ref);
req->i915 = dev_priv;
req->engine = engine;
req->reset_counter = reset_counter;
req->ctx = ctx;
i915_gem_context_reference(req->ctx);
@ -2791,7 +2784,8 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
* fully prepared. Thus it can be cleaned up using the proper
* free code.
*/
i915_gem_request_cancel(req);
intel_ring_reserved_space_cancel(req->ringbuf);
i915_gem_request_unreference(req);
return ret;
}
@ -2828,13 +2822,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
return err ? ERR_PTR(err) : req;
}
void i915_gem_request_cancel(struct drm_i915_gem_request *req)
{
intel_ring_reserved_space_cancel(req->ringbuf);
i915_gem_request_unreference(req);
}
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine)
{
@ -3140,11 +3127,9 @@ retire:
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
struct drm_i915_gem_request *req[I915_NUM_ENGINES];
unsigned reset_counter;
int i, n = 0;
int ret;
@ -3178,7 +3163,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
drm_gem_object_unreference(&obj->base);
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
for (i = 0; i < I915_NUM_ENGINES; i++) {
if (obj->last_read_req[i] == NULL)
@ -3191,7 +3175,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < n; i++) {
if (ret == 0)
ret = __i915_wait_request(req[i], reset_counter, true,
ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file));
i915_gem_request_unreference__unlocked(req[i]);
@ -3223,7 +3207,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (!i915_semaphore_is_enabled(obj->base.dev)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req,
atomic_read(&i915->gpu_error.reset_counter),
i915->mm.interruptible,
NULL,
&i915->rps.semaphores);
@ -3344,9 +3327,6 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
return;
/* Wait for any direct GTT access to complete */
mb();
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
@ -3451,12 +3431,9 @@ int i915_gpu_idle(struct drm_device *dev)
return PTR_ERR(req);
ret = i915_switch_context(req);
if (ret) {
i915_gem_request_cancel(req);
return ret;
}
i915_add_request_no_flush(req);
if (ret)
return ret;
}
ret = intel_engine_idle(engine);
@ -4179,16 +4156,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
struct drm_i915_gem_request *request, *target = NULL;
unsigned reset_counter;
int ret;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
if (ret)
return ret;
/* ABI: return -EIO if already wedged */
if (i915_terminally_wedged(&dev_priv->gpu_error))
return -EIO;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
@ -4204,7 +4180,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
target = request;
}
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (target)
i915_gem_request_reference(target);
spin_unlock(&file_priv->mm.lock);
@ -4212,7 +4187,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
ret = __i915_wait_request(target, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@ -4372,7 +4347,6 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
BUG_ON(!vma);
WARN_ON(vma->pin_count == 0);
WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
@ -4889,7 +4863,7 @@ i915_gem_init_hw(struct drm_device *dev)
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
if (dev_priv->ellc_size)
if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
if (IS_HASWELL(dev))
@ -4933,6 +4907,8 @@ i915_gem_init_hw(struct drm_device *dev)
goto out;
}
intel_mocs_init_l3cc_table(dev);
/* We can't enable contexts until all firmware is loaded */
if (HAS_GUC_UCODE(dev)) {
ret = intel_guc_ucode_load(dev);
@ -4958,34 +4934,33 @@ i915_gem_init_hw(struct drm_device *dev)
req = i915_gem_request_alloc(engine, NULL);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
i915_gem_cleanup_engines(dev);
goto out;
break;
}
if (engine->id == RCS) {
for (j = 0; j < NUM_L3_SLICES(dev); j++)
i915_gem_l3_remap(req, j);
for (j = 0; j < NUM_L3_SLICES(dev); j++) {
ret = i915_gem_l3_remap(req, j);
if (ret)
goto err_request;
}
}
ret = i915_ppgtt_init_ring(req);
if (ret && ret != -EIO) {
DRM_ERROR("PPGTT enable %s failed %d\n",
engine->name, ret);
i915_gem_request_cancel(req);
i915_gem_cleanup_engines(dev);
goto out;
}
if (ret)
goto err_request;
ret = i915_gem_context_enable(req);
if (ret && ret != -EIO) {
DRM_ERROR("Context enable %s failed %d\n",
engine->name, ret);
i915_gem_request_cancel(req);
i915_gem_cleanup_engines(dev);
goto out;
}
if (ret)
goto err_request;
err_request:
i915_add_request_no_flush(req);
if (ret) {
DRM_ERROR("Failed to enable %s, error=%d\n",
engine->name, ret);
i915_gem_cleanup_engines(dev);
break;
}
}
out:

View File

@ -0,0 +1,34 @@
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_GEM_H__
#define __I915_GEM_H__
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(expr) BUG_ON(expr)
#else
#define GEM_BUG_ON(expr)
#endif
#endif /* __I915_GEM_H__ */

View File

@ -342,7 +342,7 @@ void i915_gem_context_reset(struct drm_device *dev)
struct intel_context *ctx;
list_for_each_entry(ctx, &dev_priv->context_list, link)
intel_lr_context_reset(dev, ctx);
intel_lr_context_reset(dev_priv, ctx);
}
for (i = 0; i < I915_NUM_ENGINES; i++) {
@ -539,7 +539,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
len = 4;
if (INTEL_INFO(engine->dev)->gen >= 7)
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
ret = intel_ring_begin(req, len);
if (ret)
@ -579,6 +579,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
if (INTEL_INFO(engine->dev)->gen >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
intel_ring_emit(engine,
MI_LOAD_REGISTER_IMM(num_rings));
@ -586,11 +587,19 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
if (signaller == engine)
continue;
intel_ring_emit_reg(engine,
RING_PSMI_CTL(signaller->mmio_base));
last_reg = RING_PSMI_CTL(signaller->mmio_base);
intel_ring_emit_reg(engine, last_reg);
intel_ring_emit(engine,
_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
/* Insert a delay before the next switch! */
intel_ring_emit(engine,
MI_STORE_REGISTER_MEM |
MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit_reg(engine, last_reg);
intel_ring_emit(engine, engine->scratch.gtt_offset);
intel_ring_emit(engine, MI_NOOP);
}
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
}
@ -600,50 +609,48 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
return ret;
}
static inline bool should_skip_switch(struct intel_engine_cs *engine,
struct intel_context *from,
struct intel_context *to)
static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
struct intel_context *to)
{
if (to->remap_slice)
return false;
if (to->ppgtt && from == to &&
!(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
return true;
if (!to->legacy_hw_ctx.initialized)
return false;
return false;
if (to->ppgtt &&
!(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
return false;
return to == engine->last_context;
}
static bool
needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
{
struct drm_i915_private *dev_priv = engine->dev->dev_private;
if (!to->ppgtt)
return false;
if (INTEL_INFO(engine->dev)->gen < 8)
if (engine->last_context == to &&
!(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
return false;
if (engine->id != RCS)
return true;
if (engine != &dev_priv->engine[RCS])
if (INTEL_INFO(engine->dev)->gen < 8)
return true;
return false;
}
static bool
needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
u32 hw_flags)
needs_pd_load_post(struct intel_context *to, u32 hw_flags)
{
struct drm_i915_private *dev_priv = engine->dev->dev_private;
if (!to->ppgtt)
return false;
if (!IS_GEN8(engine->dev))
return false;
if (engine != &dev_priv->engine[RCS])
if (!IS_GEN8(to->i915))
return false;
if (hw_flags & MI_RESTORE_INHIBIT)
@ -652,60 +659,33 @@ needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
return false;
}
static int do_switch(struct drm_i915_gem_request *req)
static int do_rcs_switch(struct drm_i915_gem_request *req)
{
struct intel_context *to = req->ctx;
struct intel_engine_cs *engine = req->engine;
struct drm_i915_private *dev_priv = req->i915;
struct intel_context *from = engine->last_context;
u32 hw_flags = 0;
bool uninitialized = false;
struct intel_context *from;
u32 hw_flags;
int ret, i;
if (from != NULL && engine == &dev_priv->engine[RCS]) {
BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
}
if (should_skip_switch(engine, from, to))
if (skip_rcs_switch(engine, to))
return 0;
/* Trying to pin first makes error handling easier. */
if (engine == &dev_priv->engine[RCS]) {
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(engine->dev),
0);
if (ret)
return ret;
}
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
get_context_alignment(engine->dev),
0);
if (ret)
return ret;
/*
* Pin can switch back to the default context if we end up calling into
* evict_everything - as a last ditch gtt defrag effort that also
* switches to the default context. Hence we need to reload from here.
*
* XXX: Doing so is painfully broken!
*/
from = engine->last_context;
if (needs_pd_load_pre(engine, to)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting
* a context."*/
trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
if (ret)
goto unpin_out;
/* Doing a PD load always reloads the page dirs */
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
if (engine != &dev_priv->engine[RCS]) {
if (from)
i915_gem_context_unreference(from);
goto done;
}
/*
* Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu
@ -718,53 +698,37 @@ static int do_switch(struct drm_i915_gem_request *req)
if (ret)
goto unpin_out;
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
hw_flags |= MI_RESTORE_INHIBIT;
if (needs_pd_load_pre(engine, to)) {
/* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting
* a context."*/
trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
if (ret)
goto unpin_out;
}
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
/* NB: If we inhibit the restore, the context is not allowed to
* die because future work may end up depending on valid address
* space. This means we must enforce that a page table load
* occur when this occurs. */
} else if (to->ppgtt &&
(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) {
hw_flags |= MI_FORCE_RESTORE;
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
hw_flags = MI_RESTORE_INHIBIT;
else if (to->ppgtt &&
intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
hw_flags = MI_FORCE_RESTORE;
else
hw_flags = 0;
/* We should never emit switch_mm more than once */
WARN_ON(needs_pd_load_pre(engine, to) &&
needs_pd_load_post(engine, to, hw_flags));
needs_pd_load_post(to, hw_flags));
ret = mi_set_context(req, hw_flags);
if (ret)
goto unpin_out;
/* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
*/
if (needs_pd_load_post(engine, to, hw_flags)) {
trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
/* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail
* here. Still, let the user know something dangerous has
* happened.
*/
if (ret) {
DRM_ERROR("Failed to change address space on context switch\n");
goto unpin_out;
}
}
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
ret = i915_gem_l3_remap(req, i);
/* If it failed, try again next round */
if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
ret = mi_set_context(req, hw_flags);
if (ret)
DRM_DEBUG_DRIVER("L3 remapping failed\n");
else
to->remap_slice &= ~(1<<i);
goto unpin_out;
}
/* The backing object for the context is done after switching to the
@ -789,27 +753,51 @@ static int do_switch(struct drm_i915_gem_request *req)
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
i915_gem_context_unreference(from);
}
uninitialized = !to->legacy_hw_ctx.initialized;
to->legacy_hw_ctx.initialized = true;
done:
i915_gem_context_reference(to);
engine->last_context = to;
if (uninitialized) {
/* GEN8 does *not* require an explicit reload if the PDPs have been
* setup, and we do not wish to move them.
*/
if (needs_pd_load_post(to, hw_flags)) {
trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
/* The hardware context switch is emitted, but we haven't
* actually changed the state - so it's probably safe to bail
* here. Still, let the user know something dangerous has
* happened.
*/
if (ret)
return ret;
}
if (to->ppgtt)
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
for (i = 0; i < MAX_L3_SLICES; i++) {
if (!(to->remap_slice & (1<<i)))
continue;
ret = i915_gem_l3_remap(req, i);
if (ret)
return ret;
to->remap_slice &= ~(1<<i);
}
if (!to->legacy_hw_ctx.initialized) {
if (engine->init_context) {
ret = engine->init_context(req);
if (ret)
DRM_ERROR("ring init context: %d\n", ret);
return ret;
}
to->legacy_hw_ctx.initialized = true;
}
return 0;
unpin_out:
if (engine->id == RCS)
i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
return ret;
}
@ -834,17 +822,33 @@ int i915_switch_context(struct drm_i915_gem_request *req)
WARN_ON(i915.enable_execlists);
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
if (req->ctx != engine->last_context) {
i915_gem_context_reference(req->ctx);
if (engine->id != RCS ||
req->ctx->legacy_hw_ctx.rcs_state == NULL) {
struct intel_context *to = req->ctx;
if (needs_pd_load_pre(engine, to)) {
int ret;
trace_switch_mm(engine, to);
ret = to->ppgtt->switch_mm(to->ppgtt, req);
if (ret)
return ret;
/* Doing a PD load always reloads the page dirs */
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
}
if (to != engine->last_context) {
i915_gem_context_reference(to);
if (engine->last_context)
i915_gem_context_unreference(engine->last_context);
engine->last_context = req->ctx;
engine->last_context = to;
}
return 0;
}
return do_switch(req);
return do_rcs_switch(req);
}
static bool contexts_enabled(struct drm_device *dev)

View File

@ -1137,7 +1137,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
}
}
void
static void
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
{
/* Unconditionally force add_request to emit a full flush. */
@ -1322,7 +1322,6 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, params->request);
i915_gem_execbuffer_retire_commands(params);
return 0;
}
@ -1624,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_request_add_to_client(req, file);
if (ret)
goto err_batch_unpin;
goto err_request;
/*
* Save assorted stuff away to pass through to *_submission().
@ -1641,6 +1640,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->request = req;
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
err_request:
i915_gem_execbuffer_retire_commands(params);
err_batch_unpin:
/*
@ -1657,14 +1658,6 @@ err:
i915_gem_context_unreference(ctx);
eb_destroy(eb);
/*
* If the request was created but not successfully submitted then it
* must be freed again. If it was submitted then it is being tracked
* on the active request list and no clean up is required here.
*/
if (ret && !IS_ERR_OR_NULL(req))
i915_gem_request_cancel(req);
mutex_unlock(&dev->struct_mutex);
pre_mutex_err:

View File

@ -745,7 +745,7 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
num_entries--;
}
kunmap_px(ppgtt, pt);
kunmap_px(ppgtt, pt_vaddr);
pte = 0;
if (++pde == I915_PDES) {
@ -905,11 +905,10 @@ static int gen8_init_scratch(struct i915_address_space *vm)
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
enum vgt_g2v_type msg;
struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
int i;
if (USES_FULL_48BIT_PPGTT(dev)) {
if (USES_FULL_48BIT_PPGTT(dev_priv)) {
u64 daddr = px_dma(&ppgtt->pml4);
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
@ -3172,7 +3171,8 @@ int i915_ggtt_init_hw(struct drm_device *dev)
} else if (INTEL_INFO(dev)->gen < 8) {
ggtt->probe = gen6_gmch_probe;
ggtt->base.cleanup = gen6_gmch_remove;
if (IS_HASWELL(dev) && dev_priv->ellc_size)
if (HAS_EDRAM(dev))
ggtt->base.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev))
ggtt->base.pte_encode = hsw_pte_encode;

View File

@ -70,6 +70,10 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
/* Only shmemfs objects are backed by swap */
if (!obj->base.filp)
return false;
/* Only report true if by unbinding the object and putting its pages
* we can actually make forward progress towards freeing physical
* pages.
@ -336,7 +340,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct shrinker_lock_uninterruptible slu;
struct drm_i915_gem_object *obj;
unsigned long pinned, bound, unbound, freed_pages;
unsigned long unevictable, bound, unbound, freed_pages;
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
@ -347,33 +351,28 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
* assert that there are no objects with pinned pages that are not
* being pointed to by hardware.
*/
unbound = bound = pinned = 0;
unbound = bound = unevictable = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
if (!obj->base.filp) /* not backed by a freeable object */
continue;
if (obj->pages_pin_count)
pinned += obj->base.size;
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
unbound += obj->base.size;
unbound += obj->base.size >> PAGE_SHIFT;
}
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (!obj->base.filp)
continue;
if (obj->pages_pin_count)
pinned += obj->base.size;
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
bound += obj->base.size;
bound += obj->base.size >> PAGE_SHIFT;
}
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
if (freed_pages || unbound || bound)
pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
freed_pages << PAGE_SHIFT, pinned);
pr_info("Purging GPU memory, %lu pages freed, "
"%lu pages still pinned.\n",
freed_pages, unevictable);
if (unbound || bound)
pr_err("%lu and %lu bytes still available in the "
pr_err("%lu and %lu pages still available in the "
"bound and unbound GPU page lists.\n",
bound, unbound);

View File

@ -95,9 +95,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
u32 base;
/* Almost universally we can find the Graphics Base of Stolen Memory
* at offset 0x5c in the igfx configuration space. On a few (desktop)
* machines this is also mirrored in the bridge device at different
* locations, or in the MCHBAR.
* at register BSM (0x5c) in the igfx configuration space. On a few
* (desktop) machines this is also mirrored in the bridge device at
* different locations, or in the MCHBAR.
*
* On 865 we just check the TOUD register.
*
@ -107,9 +107,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*/
base = 0;
if (INTEL_INFO(dev)->gen >= 3) {
/* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(dev->pdev, 0x5c, &base);
base &= ~((1<<20) - 1);
u32 bsm;
pci_read_config_dword(dev->pdev, BSM, &bsm);
base = bsm & BSM_MASK;
} else if (IS_I865G(dev)) {
u16 toud = 0;

View File

@ -34,7 +34,7 @@
struct i915_mm_struct {
struct mm_struct *mm;
struct drm_device *dev;
struct drm_i915_private *i915;
struct i915_mmu_notifier *mn;
struct hlist_node node;
struct kref kref;
@ -49,6 +49,7 @@ struct i915_mmu_notifier {
struct hlist_node node;
struct mmu_notifier mn;
struct rb_root objects;
struct workqueue_struct *wq;
};
struct i915_mmu_object {
@ -60,6 +61,37 @@ struct i915_mmu_object {
bool attached;
};
static void wait_rendering(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
int i, n;
if (!obj->active)
return;
n = 0;
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct drm_i915_gem_request *req;
req = obj->last_read_req[i];
if (req == NULL)
continue;
requests[n++] = i915_gem_request_reference(req);
}
mutex_unlock(&dev->struct_mutex);
for (i = 0; i < n; i++)
__i915_wait_request(requests[i], false, NULL, NULL);
mutex_lock(&dev->struct_mutex);
for (i = 0; i < n; i++)
i915_gem_request_unreference(requests[i]);
}
static void cancel_userptr(struct work_struct *work)
{
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
@ -75,13 +107,13 @@ static void cancel_userptr(struct work_struct *work)
struct i915_vma *vma, *tmp;
bool was_interruptible;
wait_rendering(obj);
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
int ret = i915_vma_unbind(vma);
WARN_ON(ret && ret != -EIO);
}
list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
WARN_ON(i915_vma_unbind(vma));
WARN_ON(i915_gem_object_put_pages(obj));
dev_priv->mm.interruptible = was_interruptible;
@ -140,7 +172,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
*/
mo = container_of(it, struct i915_mmu_object, it);
if (kref_get_unless_zero(&mo->obj->base.refcount))
schedule_work(&mo->work);
queue_work(mn->wq, &mo->work);
list_add(&mo->link, &cancelled);
it = interval_tree_iter_next(it, start, end);
@ -148,6 +180,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
list_for_each_entry(mo, &cancelled, link)
del_object(mo);
spin_unlock(&mn->lock);
flush_workqueue(mn->wq);
}
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
@ -167,10 +201,16 @@ i915_mmu_notifier_create(struct mm_struct *mm)
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT;
mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
if (mn->wq == NULL) {
kfree(mn);
return ERR_PTR(-ENOMEM);
}
/* Protected by mmap_sem (write-lock) */
ret = __mmu_notifier_register(&mn->mn, mm);
if (ret) {
destroy_workqueue(mn->wq);
kfree(mn);
return ERR_PTR(ret);
}
@ -205,13 +245,13 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
return mn;
down_write(&mm->mm->mmap_sem);
mutex_lock(&to_i915(mm->dev)->mm_lock);
mutex_lock(&mm->i915->mm_lock);
if ((mn = mm->mn) == NULL) {
mn = i915_mmu_notifier_create(mm->mm);
if (!IS_ERR(mn))
mm->mn = mn;
}
mutex_unlock(&to_i915(mm->dev)->mm_lock);
mutex_unlock(&mm->i915->mm_lock);
up_write(&mm->mm->mmap_sem);
return mn;
@ -256,6 +296,7 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
return;
mmu_notifier_unregister(&mn->mn, mm);
destroy_workqueue(mn->wq);
kfree(mn);
}
@ -327,7 +368,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
}
kref_init(&mm->kref);
mm->dev = obj->base.dev;
mm->i915 = to_i915(obj->base.dev);
mm->mm = current->mm;
atomic_inc(&current->mm->mm_count);
@ -362,7 +403,7 @@ __i915_mm_struct_free(struct kref *kref)
/* Protected by dev_priv->mm_lock */
hash_del(&mm->node);
mutex_unlock(&to_i915(mm->dev)->mm_lock);
mutex_unlock(&mm->i915->mm_lock);
INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
schedule_work(&mm->work);
@ -498,19 +539,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
!obj->userptr.read_only, 0,
pvec + pinned, NULL);
if (ret < 0)
break;
ret = -EFAULT;
if (atomic_inc_not_zero(&mm->mm_users)) {
down_read(&mm->mmap_sem);
while (pinned < npages) {
ret = get_user_pages_remote
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
!obj->userptr.read_only, 0,
pvec + pinned, NULL);
if (ret < 0)
break;
pinned += ret;
pinned += ret;
}
up_read(&mm->mmap_sem);
mmput(mm);
}
up_read(&mm->mmap_sem);
}
mutex_lock(&dev->struct_mutex);

View File

@ -179,15 +179,11 @@ static void guc_init_doorbell(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct guc_doorbell_info *doorbell;
void *base;
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
doorbell = base + client->doorbell_offset;
doorbell = client->client_base + client->doorbell_offset;
doorbell->db_status = 1;
doorbell->db_status = GUC_DOORBELL_ENABLED;
doorbell->cookie = 0;
kunmap_atomic(base);
}
static int guc_ring_doorbell(struct i915_guc_client *gc)
@ -195,11 +191,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
struct guc_process_desc *desc;
union guc_doorbell_qw db_cmp, db_exc, db_ret;
union guc_doorbell_qw *db;
void *base;
int attempt = 2, ret = -EAGAIN;
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
desc = gc->client_base + gc->proc_desc_offset;
/* Update the tail so it is visible to GuC */
desc->tail = gc->wq_tail;
@ -215,7 +209,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
db = base + gc->doorbell_offset;
db = gc->client_base + gc->doorbell_offset;
while (attempt--) {
/* lets ring the doorbell */
@ -244,10 +238,6 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
db_exc.cookie = 1;
}
/* Finally, update the cached copy of the GuC's WQ head */
gc->wq_head = desc->head;
kunmap_atomic(base);
return ret;
}
@ -256,16 +246,12 @@ static void guc_disable_doorbell(struct intel_guc *guc,
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct guc_doorbell_info *doorbell;
void *base;
i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
int value;
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
doorbell = base + client->doorbell_offset;
doorbell = client->client_base + client->doorbell_offset;
doorbell->db_status = 0;
kunmap_atomic(base);
doorbell->db_status = GUC_DOORBELL_DISABLED;
I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
@ -341,10 +327,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct guc_process_desc *desc;
void *base;
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
desc = base + client->proc_desc_offset;
desc = client->client_base + client->proc_desc_offset;
memset(desc, 0, sizeof(*desc));
@ -361,8 +345,6 @@ static void guc_init_proc_desc(struct intel_guc *guc,
desc->wq_size_bytes = client->wq_size;
desc->wq_status = WQ_STATUS_ACTIVE;
desc->priority = client->priority;
kunmap_atomic(base);
}
/*
@ -376,12 +358,14 @@ static void guc_init_proc_desc(struct intel_guc *guc,
static void guc_init_ctx_desc(struct intel_guc *guc,
struct i915_guc_client *client)
{
struct drm_i915_gem_object *client_obj = client->client_obj;
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
struct intel_context *ctx = client->owner;
struct guc_context_desc desc;
struct sg_table *sg;
enum intel_engine_id id;
u32 gfx_addr;
memset(&desc, 0, sizeof(desc));
@ -410,16 +394,17 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
lrc->context_desc = (u32)ctx_desc;
/* The state page is after PPHWSP */
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
LRC_STATE_PN * PAGE_SIZE;
gfx_addr = i915_gem_obj_ggtt_offset(obj);
lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
obj = ctx->engine[id].ringbuf->obj;
gfx_addr = i915_gem_obj_ggtt_offset(obj);
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_begin = gfx_addr;
lrc->ring_end = gfx_addr + obj->base.size - 1;
lrc->ring_next_free_location = gfx_addr;
lrc->ring_current_tail_pointer_value = 0;
desc.engines_used |= (1 << engine->guc_id);
@ -428,22 +413,17 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
WARN_ON(desc.engines_used == 0);
/*
* The CPU address is only needed at certain points, so kmap_atomic on
* demand instead of storing it in the ctx descriptor.
* XXX: May make debug easier to have it mapped
* The doorbell, process descriptor, and workqueue are all parts
* of the client object, which the GuC will reference via the GGTT
*/
desc.db_trigger_cpu = 0;
desc.db_trigger_uk = client->doorbell_offset +
i915_gem_obj_ggtt_offset(client->client_obj);
desc.db_trigger_phy = client->doorbell_offset +
sg_dma_address(client->client_obj->pages->sgl);
desc.process_desc = client->proc_desc_offset +
i915_gem_obj_ggtt_offset(client->client_obj);
desc.wq_addr = client->wq_offset +
i915_gem_obj_ggtt_offset(client->client_obj);
gfx_addr = i915_gem_obj_ggtt_offset(client_obj);
desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) +
client->doorbell_offset;
desc.db_trigger_cpu = (uintptr_t)client->client_base +
client->doorbell_offset;
desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
desc.process_desc = gfx_addr + client->proc_desc_offset;
desc.wq_addr = gfx_addr + client->wq_offset;
desc.wq_size = client->wq_size;
/*
@ -474,25 +454,16 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
int i915_guc_wq_check_space(struct i915_guc_client *gc)
{
struct guc_process_desc *desc;
void *base;
u32 size = sizeof(struct guc_wq_item);
int ret = -ETIMEDOUT, timeout_counter = 200;
if (!gc)
return 0;
/* Quickly return if wq space is available since last time we cache the
* head position. */
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
return 0;
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
desc = base + gc->proc_desc_offset;
desc = gc->client_base + gc->proc_desc_offset;
while (timeout_counter-- > 0) {
gc->wq_head = desc->head;
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
ret = 0;
break;
}
@ -501,19 +472,19 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
usleep_range(1000, 2000);
};
kunmap_atomic(base);
return ret;
}
static int guc_add_workqueue_item(struct i915_guc_client *gc,
struct drm_i915_gem_request *rq)
{
struct guc_process_desc *desc;
struct guc_wq_item *wqi;
void *base;
u32 tail, wq_len, wq_off, space;
space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
desc = gc->client_base + gc->proc_desc_offset;
space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
if (WARN_ON(space < sizeof(struct guc_wq_item)))
return -ENOSPC; /* shouldn't happen */
@ -661,21 +632,28 @@ static void guc_client_free(struct drm_device *dev,
if (!client)
return;
if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
/*
* First disable the doorbell, then tell the GuC we've
* finished with it, finally deallocate it in our bitmap
*/
guc_disable_doorbell(guc, client);
host2guc_release_doorbell(guc, client);
release_doorbell(guc, client->doorbell_id);
}
/*
* XXX: wait for any outstanding submissions before freeing memory.
* Be sure to drop any locks
*/
if (client->client_base) {
/*
* If we got as far as setting up a doorbell, make sure
* we shut it down before unmapping & deallocating the
* memory. So first disable the doorbell, then tell the
* GuC that we've finished with it, finally deallocate
* it in our bitmap
*/
if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
guc_disable_doorbell(guc, client);
host2guc_release_doorbell(guc, client);
release_doorbell(guc, client->doorbell_id);
}
kunmap(kmap_to_page(client->client_base));
}
gem_release_guc_obj(client->client_obj);
if (client->ctx_index != GUC_INVALID_CTX_ID) {
@ -696,7 +674,7 @@ static void guc_client_free(struct drm_device *dev,
* @ctx: the context that owns the client (we use the default render
* context)
*
* Return: An i915_guc_client object if success.
* Return: An i915_guc_client object if success, else NULL.
*/
static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
uint32_t priority,
@ -728,7 +706,9 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
if (!obj)
goto err;
/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
client->client_obj = obj;
client->client_base = kmap(i915_gem_object_get_page(obj, 0));
client->wq_offset = GUC_DB_SIZE;
client->wq_size = GUC_WQ_SIZE;

View File

@ -1264,18 +1264,17 @@ out:
mutex_unlock(&dev_priv->dev->struct_mutex);
}
static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
u32 iir)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!HAS_L3_DPF(dev))
if (!HAS_L3_DPF(dev_priv))
return;
spin_lock(&dev_priv->irq_lock);
gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
spin_unlock(&dev_priv->irq_lock);
iir &= GT_PARITY_ERROR(dev);
iir &= GT_PARITY_ERROR(dev_priv);
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
dev_priv->l3_parity.which_slice |= 1 << 1;
@ -1285,8 +1284,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
static void ilk_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir &
@ -1296,8 +1294,7 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
notify_ring(&dev_priv->engine[VCS]);
}
static void snb_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
@ -1314,8 +1311,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
if (gt_iir & GT_PARITY_ERROR(dev))
ivybridge_parity_error_irq_handler(dev, gt_iir);
if (gt_iir & GT_PARITY_ERROR(dev_priv))
ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
}
static __always_inline void
@ -1327,60 +1324,45 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
tasklet_schedule(&engine->irq_tasklet);
}
static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 master_ctl)
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
u32 master_ctl,
u32 gt_iir[4])
{
irqreturn_t ret = IRQ_NONE;
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
if (iir) {
I915_WRITE_FW(GEN8_GT_IIR(0), iir);
gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
if (gt_iir[0]) {
I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
ret = IRQ_HANDLED;
gen8_cs_irq_handler(&dev_priv->engine[RCS],
iir, GEN8_RCS_IRQ_SHIFT);
gen8_cs_irq_handler(&dev_priv->engine[BCS],
iir, GEN8_BCS_IRQ_SHIFT);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
if (iir) {
I915_WRITE_FW(GEN8_GT_IIR(1), iir);
gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
if (gt_iir[1]) {
I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
ret = IRQ_HANDLED;
gen8_cs_irq_handler(&dev_priv->engine[VCS],
iir, GEN8_VCS1_IRQ_SHIFT);
gen8_cs_irq_handler(&dev_priv->engine[VCS2],
iir, GEN8_VCS2_IRQ_SHIFT);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
if (iir) {
I915_WRITE_FW(GEN8_GT_IIR(3), iir);
gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
if (gt_iir[3]) {
I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
ret = IRQ_HANDLED;
gen8_cs_irq_handler(&dev_priv->engine[VECS],
iir, GEN8_VECS_IRQ_SHIFT);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
if (master_ctl & GEN8_GT_PM_IRQ) {
u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
if (iir & dev_priv->pm_rps_events) {
gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
if (gt_iir[2] & dev_priv->pm_rps_events) {
I915_WRITE_FW(GEN8_GT_IIR(2),
iir & dev_priv->pm_rps_events);
gt_iir[2] & dev_priv->pm_rps_events);
ret = IRQ_HANDLED;
gen6_rps_irq_handler(dev_priv, iir);
} else
DRM_ERROR("The master control interrupt lied (PM)!\n");
}
@ -1388,6 +1370,31 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
return ret;
}
static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir[4])
{
if (gt_iir[0]) {
gen8_cs_irq_handler(&dev_priv->engine[RCS],
gt_iir[0], GEN8_RCS_IRQ_SHIFT);
gen8_cs_irq_handler(&dev_priv->engine[BCS],
gt_iir[0], GEN8_BCS_IRQ_SHIFT);
}
if (gt_iir[1]) {
gen8_cs_irq_handler(&dev_priv->engine[VCS],
gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
gen8_cs_irq_handler(&dev_priv->engine[VCS2],
gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
}
if (gt_iir[3])
gen8_cs_irq_handler(&dev_priv->engine[VECS],
gt_iir[3], GEN8_VECS_IRQ_SHIFT);
if (gt_iir[2] & dev_priv->pm_rps_events)
gen6_rps_irq_handler(dev_priv, gt_iir[2]);
}
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
{
switch (port) {
@ -1644,10 +1651,10 @@ static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
return true;
}
static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
u32 pipe_stats[I915_MAX_PIPES])
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pipe_stats[I915_MAX_PIPES] = { };
int pipe;
spin_lock(&dev_priv->irq_lock);
@ -1701,6 +1708,13 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
I915_WRITE(reg, pipe_stats[pipe]);
}
spin_unlock(&dev_priv->irq_lock);
}
static void valleyview_pipestat_irq_handler(struct drm_device *dev,
u32 pipe_stats[I915_MAX_PIPES])
{
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
@ -1723,22 +1737,21 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
gmbus_irq_handler(dev);
}
static void i9xx_hpd_irq_handler(struct drm_device *dev)
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
if (hotplug_status)
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
return hotplug_status;
}
static void i9xx_hpd_irq_handler(struct drm_device *dev,
u32 hotplug_status)
{
u32 pin_mask = 0, long_mask = 0;
if (!hotplug_status)
return;
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
/*
* Make sure hotplug status is cleared before we clear IIR, or else we
* may miss hotplug events.
*/
POSTING_READ(PORT_HOTPLUG_STAT);
if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
@ -1768,59 +1781,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 iir, gt_iir, pm_iir;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
while (true) {
/* Find, clear, then process each source of interrupt */
gt_iir = I915_READ(GTIIR);
if (gt_iir)
I915_WRITE(GTIIR, gt_iir);
pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir)
I915_WRITE(GEN6_PMIIR, pm_iir);
iir = I915_READ(VLV_IIR);
if (iir) {
/* Consume port before clearing IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT)
i9xx_hpd_irq_handler(dev);
I915_WRITE(VLV_IIR, iir);
}
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
goto out;
ret = IRQ_HANDLED;
if (gt_iir)
snb_gt_irq_handler(dev, dev_priv, gt_iir);
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
/* Call regardless, as some status bits might not be
* signalled in iir */
valleyview_pipestat_irq_handler(dev, iir);
}
out:
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 master_ctl, iir;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
@ -1830,6 +1790,95 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
disable_rpm_wakeref_asserts(dev_priv);
do {
u32 iir, gt_iir, pm_iir;
u32 pipe_stats[I915_MAX_PIPES] = {};
u32 hotplug_status = 0;
u32 ier = 0;
gt_iir = I915_READ(GTIIR);
pm_iir = I915_READ(GEN6_PMIIR);
iir = I915_READ(VLV_IIR);
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
break;
ret = IRQ_HANDLED;
/*
* Theory on interrupt generation, based on empirical evidence:
*
* x = ((VLV_IIR & VLV_IER) ||
* (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
* (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
*
* A CPU interrupt will only be raised when 'x' has a 0->1 edge.
* Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
* guarantee the CPU interrupt will be raised again even if we
* don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
* bits this time around.
*/
I915_WRITE(VLV_MASTER_IER, 0);
ier = I915_READ(VLV_IER);
I915_WRITE(VLV_IER, 0);
if (gt_iir)
I915_WRITE(GTIIR, gt_iir);
if (pm_iir)
I915_WRITE(GEN6_PMIIR, pm_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
/* Call regardless, as some status bits might not be
* signalled in iir */
valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
/*
* VLV_IIR is single buffered, and reflects the level
* from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
*/
if (iir)
I915_WRITE(VLV_IIR, iir);
I915_WRITE(VLV_IER, ier);
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
POSTING_READ(VLV_MASTER_IER);
if (gt_iir)
snb_gt_irq_handler(dev_priv, gt_iir);
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev, hotplug_status);
valleyview_pipestat_irq_handler(dev, pipe_stats);
} while (0);
enable_rpm_wakeref_asserts(dev_priv);
return ret;
}
static irqreturn_t cherryview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
irqreturn_t ret = IRQ_NONE;
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(dev_priv);
do {
u32 master_ctl, iir;
u32 gt_iir[4] = {};
u32 pipe_stats[I915_MAX_PIPES] = {};
u32 hotplug_status = 0;
u32 ier = 0;
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
iir = I915_READ(VLV_IIR);
@ -1838,25 +1887,49 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
/*
* Theory on interrupt generation, based on empirical evidence:
*
* x = ((VLV_IIR & VLV_IER) ||
* ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
* (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
*
* A CPU interrupt will only be raised when 'x' has a 0->1 edge.
* Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
* guarantee the CPU interrupt will be raised again even if we
* don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
* bits this time around.
*/
I915_WRITE(GEN8_MASTER_IRQ, 0);
ier = I915_READ(VLV_IER);
I915_WRITE(VLV_IER, 0);
/* Find, clear, then process each source of interrupt */
gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
if (iir) {
/* Consume port before clearing IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT)
i9xx_hpd_irq_handler(dev);
I915_WRITE(VLV_IIR, iir);
}
gen8_gt_irq_handler(dev_priv, master_ctl);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
/* Call regardless, as some status bits might not be
* signalled in iir */
valleyview_pipestat_irq_handler(dev, iir);
valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
/*
* VLV_IIR is single buffered, and reflects the level
* from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
*/
if (iir)
I915_WRITE(VLV_IIR, iir);
I915_WRITE(VLV_IER, ier);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
gen8_gt_irq_handler(dev_priv, gt_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev, hotplug_status);
valleyview_pipestat_irq_handler(dev, pipe_stats);
} while (0);
enable_rpm_wakeref_asserts(dev_priv);
@ -2217,9 +2290,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
if (INTEL_INFO(dev)->gen >= 6)
snb_gt_irq_handler(dev, dev_priv, gt_iir);
snb_gt_irq_handler(dev_priv, gt_iir);
else
ilk_gt_irq_handler(dev, dev_priv, gt_iir);
ilk_gt_irq_handler(dev_priv, gt_iir);
}
de_iir = I915_READ(DEIIR);
@ -2419,6 +2492,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
struct drm_device *dev = arg;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 master_ctl;
u32 gt_iir[4] = {};
irqreturn_t ret;
if (!intel_irqs_enabled(dev_priv))
@ -2435,7 +2509,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
disable_rpm_wakeref_asserts(dev_priv);
/* Find, clear, then process each source of interrupt */
ret = gen8_gt_irq_handler(dev_priv, master_ctl);
ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
gen8_gt_irq_handler(dev_priv, gt_iir);
ret |= gen8_de_irq_handler(dev_priv, master_ctl);
I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
@ -2483,7 +2558,6 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
static void i915_reset_and_wakeup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_gpu_error *error = &dev_priv->gpu_error;
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
@ -2501,7 +2575,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
* the reset in-progress bit is only ever set by code outside of this
* work we don't need to worry about any other races.
*/
if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
reset_event);
@ -2529,25 +2603,9 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
intel_runtime_pm_put(dev_priv);
if (ret == 0) {
/*
* After all the gem state is reset, increment the reset
* counter and wake up everyone waiting for the reset to
* complete.
*
* Since unlock operations are a one-sided barrier only,
* we need to insert a barrier here to order any seqno
* updates before
* the counter increment.
*/
smp_mb__before_atomic();
atomic_inc(&dev_priv->gpu_error.reset_counter);
if (ret == 0)
kobject_uevent_env(&dev->primary->kdev->kobj,
KOBJ_CHANGE, reset_done_event);
} else {
atomic_or(I915_WEDGED, &error->reset_counter);
}
/*
* Note: The wake_up also serves as a memory barrier so that
@ -3285,6 +3343,55 @@ static void gen5_gt_irq_reset(struct drm_device *dev)
GEN5_IRQ_RESET(GEN6_PM);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
for_each_pipe(dev_priv, pipe) {
I915_WRITE(PIPESTAT(pipe),
PIPE_FIFO_UNDERRUN_STATUS |
PIPESTAT_INT_STATUS_MASK);
dev_priv->pipestat_irq_mask[pipe] = 0;
}
GEN5_IRQ_RESET(VLV_);
dev_priv->irq_mask = ~0;
}
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
for_each_pipe(dev_priv, pipe)
i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
enable_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
if (IS_CHERRYVIEW(dev_priv))
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
WARN_ON(dev_priv->irq_mask != ~0);
dev_priv->irq_mask = ~enable_mask;
GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
}
/* drm_dma.h hooks
*/
static void ironlake_irq_reset(struct drm_device *dev)
@ -3302,34 +3409,19 @@ static void ironlake_irq_reset(struct drm_device *dev)
ibx_irq_reset(dev);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
GEN5_IRQ_RESET(VLV_);
}
static void valleyview_irq_preinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* VLV magic */
I915_WRITE(VLV_IMR, 0);
I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
gen5_gt_irq_reset(dev);
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
vlv_display_irq_reset(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
@ -3402,9 +3494,10 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_PCU_);
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
vlv_display_irq_reset(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
@ -3651,74 +3744,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
return 0;
}
static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
{
u32 pipestat_mask;
u32 iir_mask;
enum pipe pipe;
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
for_each_pipe(dev_priv, pipe)
i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
if (IS_CHERRYVIEW(dev_priv))
iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
dev_priv->irq_mask &= ~iir_mask;
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
POSTING_READ(VLV_IMR);
}
static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
{
u32 pipestat_mask;
u32 iir_mask;
enum pipe pipe;
iir_mask = I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
if (IS_CHERRYVIEW(dev_priv))
iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
dev_priv->irq_mask |= iir_mask;
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IIR, iir_mask);
I915_WRITE(VLV_IIR, iir_mask);
POSTING_READ(VLV_IIR);
pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
PIPE_CRC_DONE_INTERRUPT_STATUS;
i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
for_each_pipe(dev_priv, pipe)
i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
pipestat_mask = PIPESTAT_INT_STATUS_MASK |
PIPE_FIFO_UNDERRUN_STATUS;
for_each_pipe(dev_priv, pipe)
I915_WRITE(PIPESTAT(pipe), pipestat_mask);
POSTING_READ(PIPESTAT(PIPE_A));
}
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
assert_spin_locked(&dev_priv->irq_lock);
@ -3728,8 +3753,10 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = true;
if (intel_irqs_enabled(dev_priv))
valleyview_display_irqs_install(dev_priv);
if (intel_irqs_enabled(dev_priv)) {
vlv_display_irq_reset(dev_priv);
vlv_display_irq_postinstall(dev_priv);
}
}
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
@ -3742,45 +3769,23 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = false;
if (intel_irqs_enabled(dev_priv))
valleyview_display_irqs_uninstall(dev_priv);
vlv_display_irq_reset(dev_priv);
}
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
dev_priv->irq_mask = ~0;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
POSTING_READ(VLV_IMR);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_install(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
static int valleyview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
vlv_display_irq_postinstall(dev_priv);
gen5_gt_irq_postinstall(dev);
/* ack & enable invalid PTE error interrupts */
#if 0 /* FIXME: add support to irq handler for checking these bits */
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
#endif
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
POSTING_READ(VLV_MASTER_IER);
return 0;
}
@ -3791,7 +3796,6 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
@ -3803,6 +3807,9 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
if (HAS_L3_DPF(dev_priv))
gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
dev_priv->pm_irq_mask = 0xffffffff;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
@ -3870,7 +3877,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev))
ibx_irq_postinstall(dev);
I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
return 0;
@ -3880,11 +3887,14 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
vlv_display_irq_postinstall(dev_priv);
gen8_gt_irq_postinstall(dev_priv);
I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
vlv_display_irq_postinstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
POSTING_READ(GEN8_MASTER_IRQ);
return 0;
@ -3900,20 +3910,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)
gen8_irq_reset(dev);
}
static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
{
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
valleyview_display_irqs_uninstall(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
vlv_display_irq_reset(dev_priv);
dev_priv->irq_mask = ~0;
}
static void valleyview_irq_uninstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -3922,12 +3918,16 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
return;
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
gen5_gt_irq_reset(dev);
I915_WRITE(HWSTAM, 0xffffffff);
vlv_display_irq_uninstall(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
static void cherryview_irq_uninstall(struct drm_device *dev)
@ -3944,7 +3944,10 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
GEN5_IRQ_RESET(GEN8_PCU_);
vlv_display_irq_uninstall(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
vlv_display_irq_reset(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
}
static void ironlake_irq_uninstall(struct drm_device *dev)
@ -4271,8 +4274,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
/* Consume port. Then clear IIR or we'll miss events */
if (I915_HAS_HOTPLUG(dev) &&
iir & I915_DISPLAY_PORT_INTERRUPT)
i9xx_hpd_irq_handler(dev);
iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
if (hotplug_status)
i9xx_hpd_irq_handler(dev, hotplug_status);
}
I915_WRITE(IIR, iir & ~flip_mask);
new_iir = I915_READ(IIR); /* Flush posted writes */
@ -4501,8 +4507,11 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT)
i9xx_hpd_irq_handler(dev);
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
if (hotplug_status)
i9xx_hpd_irq_handler(dev, hotplug_status);
}
I915_WRITE(IIR, iir & ~flip_mask);
new_iir = I915_READ(IIR); /* Flush posted writes */

View File

@ -79,6 +79,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* PCI config space */
#define MCHBAR_I915 0x44
#define MCHBAR_I965 0x48
#define MCHBAR_SIZE (4 * 4096)
#define DEVEN 0x54
#define DEVEN_MCHBAR_EN (1 << 28)
#define BSM 0x5c
#define BSM_MASK (0xFFFF << 20)
#define HPLLCC 0xc0 /* 85x only */
#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
#define GC_CLOCK_133_200 (0 << 0)
@ -90,6 +100,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GC_CLOCK_166_266 (6 << 0)
#define GC_CLOCK_166_250 (7 << 0)
#define I915_GDRST 0xc0 /* PCI config register */
#define GRDOM_FULL (0 << 2)
#define GRDOM_RENDER (1 << 2)
#define GRDOM_MEDIA (3 << 2)
#define GRDOM_MASK (3 << 2)
#define GRDOM_RESET_STATUS (1 << 1)
#define GRDOM_RESET_ENABLE (1 << 0)
#define GCDGMBUS 0xcc
#define GCFGC2 0xda
#define GCFGC 0xf0 /* 915+ only */
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
@ -121,18 +141,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
#define GCDGMBUS 0xcc
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
#define ASLE 0xe4
#define ASLS 0xfc
#define SWSCI 0xe8
#define SWSCI_SCISEL (1 << 15)
#define SWSCI_GSSCIE (1 << 0)
#define LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
/* Graphics reset regs */
#define I915_GDRST 0xc0 /* PCI config register */
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
#define GRDOM_MASK (3<<2)
#define GRDOM_RESET_STATUS (1<<1)
#define GRDOM_RESET_ENABLE (1<<0)
#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
#define ILK_GRDOM_FULL (0<<1)
@ -1375,14 +1393,10 @@ enum skl_disp_power_wells {
#define _PORT_REF_DW6_A 0x162198
#define _PORT_REF_DW6_BC 0x6C198
/*
* FIXME: BSpec/CHV ConfigDB disagrees on the following two fields, fix them
* after testing.
*/
#define GRC_CODE_SHIFT 23
#define GRC_CODE_MASK (0x1FF << GRC_CODE_SHIFT)
#define GRC_CODE_SHIFT 24
#define GRC_CODE_MASK (0xFF << GRC_CODE_SHIFT)
#define GRC_CODE_FAST_SHIFT 16
#define GRC_CODE_FAST_MASK (0x7F << GRC_CODE_FAST_SHIFT)
#define GRC_CODE_FAST_MASK (0xFF << GRC_CODE_FAST_SHIFT)
#define GRC_CODE_SLOW_SHIFT 8
#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
#define GRC_CODE_NOM_MASK 0xFF
@ -2934,7 +2948,14 @@ enum skl_disp_power_wells {
#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
/*
* Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
* 8300) freezing up around GPU hangs. Looks as if even
* scheduling/timer interrupts start misbehaving if the RPS
* EI/thresholds are "bad", leading to a very sluggish or even
* frozen machine.
*/
#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
@ -2943,6 +2964,15 @@ enum skl_disp_power_wells {
INTERVAL_1_33_US(us)) : \
INTERVAL_1_28_US(us))
#define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100)
#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \
(IS_BROXTON(dev_priv) ? \
INTERVAL_0_833_TO_US(interval) : \
INTERVAL_1_33_TO_US(interval)) : \
INTERVAL_1_28_TO_US(interval))
/*
* Logical Context regs
*/
@ -6866,6 +6896,8 @@ enum skl_disp_power_wells {
#define VLV_SPAREG2H _MMIO(0xA194)
#define GTFIFODBG _MMIO(0x120000)
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
#define GT_FIFO_SBDROPERR (1<<6)
#define GT_FIFO_BLOBDROPERR (1<<5)
#define GT_FIFO_SB_READ_ABORTERR (1<<4)
@ -6882,8 +6914,11 @@ enum skl_disp_power_wells {
#define HSW_IDICR _MMIO(0x9008)
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
#define HSW_EDRAM_PRESENT _MMIO(0x120010)
#define HSW_EDRAM_CAP _MMIO(0x120010)
#define EDRAM_ENABLED 0x1
#define EDRAM_NUM_BANKS(cap) (((cap) >> 1) & 0xf)
#define EDRAM_WAYS_IDX(cap) (((cap) >> 5) & 0x7)
#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
#define GEN6_UCGCTL1 _MMIO(0x9400)
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
@ -7161,6 +7196,7 @@ enum skl_disp_power_wells {
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
#define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
/* Audio */
#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)

View File

@ -58,8 +58,6 @@
#define SLAVE_ADDR1 0x70
#define SLAVE_ADDR2 0x72
static int panel_type;
/* Get BDB block size given a pointer to Block ID. */
static u32 _get_blocksize(const u8 *block_base)
{
@ -205,17 +203,32 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
int panel_type;
int drrs_mode;
int ret;
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
if (!lvds_options)
return;
dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
if (lvds_options->panel_type == 0xff)
return;
panel_type = lvds_options->panel_type;
ret = intel_opregion_get_panel_type(dev_priv->dev);
if (ret >= 0) {
WARN_ON(ret > 0xf);
panel_type = ret;
DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type);
} else {
if (lvds_options->panel_type > 0xf) {
DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n",
lvds_options->panel_type);
return;
}
panel_type = lvds_options->panel_type;
DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
}
dev_priv->vbt.panel_type = panel_type;
drrs_mode = (lvds_options->dps_panel_type_bits
>> (panel_type * 2)) & MODE_MASK;
@ -251,7 +264,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
lvds_lfp_data_ptrs,
lvds_options->panel_type);
panel_type);
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
if (!panel_fixed_mode)
@ -266,7 +279,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
lvds_lfp_data_ptrs,
lvds_options->panel_type);
panel_type);
if (fp_timing) {
/* check the resolution, just to be sure */
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
@ -284,6 +297,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
{
const struct bdb_lfp_backlight_data *backlight_data;
const struct bdb_lfp_backlight_data_entry *entry;
int panel_type = dev_priv->vbt.panel_type;
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
if (!backlight_data)
@ -546,6 +560,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
const struct bdb_edp *edp;
const struct edp_power_seq *edp_pps;
const struct edp_link_params *edp_link_params;
int panel_type = dev_priv->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
if (!edp) {
@ -657,6 +672,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
const struct bdb_psr *psr;
const struct psr_table *psr_table;
int panel_type = dev_priv->vbt.panel_type;
psr = find_section(bdb, BDB_PSR);
if (!psr) {
@ -703,6 +719,7 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
const struct bdb_mipi_config *start;
const struct mipi_config *config;
const struct mipi_pps_data *pps;
int panel_type = dev_priv->vbt.panel_type;
/* parse MIPI blocks only if LFP type is MIPI */
if (!intel_bios_is_dsi_present(dev_priv, NULL))
@ -910,6 +927,7 @@ static void
parse_mipi_sequence(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
int panel_type = dev_priv->vbt.panel_type;
const struct bdb_mipi_sequence *sequence;
const u8 *seq_data;
u32 seq_size;

View File

@ -50,6 +50,7 @@ MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
@ -281,6 +282,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
uint32_t i;
uint32_t *dmc_payload;
uint32_t required_min_version;
if (!fw)
return NULL;
@ -296,15 +298,23 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version;
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
csr->version < SKL_CSR_VERSION_REQUIRED) {
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
required_min_version = SKL_CSR_VERSION_REQUIRED;
} else if (IS_BROXTON(dev_priv)) {
required_min_version = BXT_CSR_VERSION_REQUIRED;
} else {
MISSING_CASE(INTEL_REVID(dev_priv));
required_min_version = 0;
}
if (csr->version < required_min_version) {
DRM_INFO("Refusing to load old DMC firmware v%u.%u,"
" please upgrade to v%u.%u or later"
" [" FIRMWARE_URL "].\n",
CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version),
CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED));
CSR_VERSION_MAJOR(required_min_version),
CSR_VERSION_MINOR(required_min_version));
return NULL;
}
@ -456,11 +466,51 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
schedule_work(&dev_priv->csr.work);
}
/**
* intel_csr_ucode_suspend() - prepare CSR firmware before system suspend
* @dev_priv: i915 drm device
*
* Prepare the DMC firmware before entering system suspend. This includes
* flushing pending work items and releasing any resources acquired during
* init.
*/
void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
{
if (!HAS_CSR(dev_priv))
return;
flush_work(&dev_priv->csr.work);
/* Drop the reference held in case DMC isn't loaded. */
if (!dev_priv->csr.dmc_payload)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
}
/**
* intel_csr_ucode_resume() - init CSR firmware during system resume
* @dev_priv: i915 drm device
*
* Reinitialize the DMC firmware during system resume, reacquiring any
* resources released in intel_csr_ucode_suspend().
*/
void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
{
if (!HAS_CSR(dev_priv))
return;
/*
* Reacquire the reference to keep RPM disabled in case DMC isn't
* loaded.
*/
if (!dev_priv->csr.dmc_payload)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
}
/**
* intel_csr_ucode_fini() - unload the CSR firmware.
* @dev_priv: i915 drm device.
*
* Firmmware unloading includes freeing the internal momory and reset the
* Firmmware unloading includes freeing the internal memory and reset the
* firmware loading status.
*/
void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
@ -468,7 +518,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
flush_work(&dev_priv->csr.work);
intel_csr_ucode_suspend(dev_priv);
kfree(dev_priv->csr.dmc_payload);
}

View File

@ -443,9 +443,17 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
} else if (IS_BROADWELL(dev_priv)) {
ddi_translations_fdi = bdw_ddi_translations_fdi;
ddi_translations_dp = bdw_ddi_translations_dp;
ddi_translations_edp = bdw_ddi_translations_edp;
if (dev_priv->vbt.edp.low_vswing) {
ddi_translations_edp = bdw_ddi_translations_edp;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
} else {
ddi_translations_edp = bdw_ddi_translations_dp;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
}
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
hdmi_default_entry = 7;
@ -1722,12 +1730,78 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
}
}
static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
return false;
if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
(PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
phy);
return false;
}
if (phy == DPIO_PHY1 &&
!(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) {
DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n");
return false;
}
if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
phy);
return false;
}
return true;
}
static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
}
static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10))
DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
}
static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
static void broxton_phy_init(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
enum port port;
u32 ports, val;
if (broxton_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy == DPIO_PHY0)
dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy);
if (broxton_phy_verify_state(dev_priv, phy)) {
DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
"won't reprogram it\n", phy);
return;
}
DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
"force reprogramming it\n", phy);
} else {
DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy);
}
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
val |= GT_DISPLAY_POWER_ON(phy);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
@ -1798,6 +1872,9 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
* enabled.
* TODO: port C is only connected on BXT-P, so on BXT0/1 we should
* power down the second channel on PHY0 as well.
*
* FIXME: Clarify programming of the following, the register is
* read-only with bit 6 fixed at 0 at least in stepping A.
*/
if (phy == DPIO_PHY1)
val |= OCL2_LDOFUSE_PWR_DIS;
@ -1810,12 +1887,10 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
* the corresponding calibrated value from PHY1, and disable
* the automatic calibration on PHY0.
*/
if (wait_for(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE,
10))
DRM_ERROR("timeout waiting for PHY1 GRC\n");
broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
val = I915_READ(BXT_PORT_REF_DW6(DPIO_PHY1));
val = (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv,
DPIO_PHY1);
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
@ -1825,17 +1900,27 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
val |= GRC_DIS | GRC_RDY_OVRD;
I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
}
/*
* During PHY1 init delay waiting for GRC calibration to finish, since
* it can happen in parallel with the subsequent PHY0 init.
*/
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
}
void broxton_ddi_phy_init(struct drm_device *dev)
void broxton_ddi_phy_init(struct drm_i915_private *dev_priv)
{
/* Enable PHY1 first since it provides Rcomp for PHY0 */
broxton_phy_init(dev->dev_private, DPIO_PHY1);
broxton_phy_init(dev->dev_private, DPIO_PHY0);
broxton_phy_init(dev_priv, DPIO_PHY1);
broxton_phy_init(dev_priv, DPIO_PHY0);
/*
* If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the
* PHY1 GRC calibration to finish, so wait for it here.
*/
broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
}
static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
@ -1846,17 +1931,126 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
val &= ~COMMON_RESET_DIS;
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
val &= ~GT_DISPLAY_POWER_ON(phy);
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
}
void broxton_ddi_phy_uninit(struct drm_device *dev)
void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
broxton_phy_uninit(dev_priv, DPIO_PHY1);
broxton_phy_uninit(dev_priv, DPIO_PHY0);
}
/* FIXME: do this in broxton_phy_uninit per phy */
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, 0);
static bool __printf(6, 7)
__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
i915_reg_t reg, u32 mask, u32 expected,
const char *reg_fmt, ...)
{
struct va_format vaf;
va_list args;
u32 val;
val = I915_READ(reg);
if ((val & mask) == expected)
return true;
va_start(args, reg_fmt);
vaf.fmt = reg_fmt;
vaf.va = &args;
DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
"current %08x, expected %08x (mask %08x)\n",
phy, &vaf, reg.reg, val, (val & ~mask) | expected,
mask);
va_end(args);
return false;
}
static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
enum port port;
u32 ports;
uint32_t mask;
bool ok;
#define _CHK(reg, mask, exp, fmt, ...) \
__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
## __VA_ARGS__)
/* We expect the PHY to be always enabled */
if (!broxton_phy_is_enabled(dev_priv, phy))
return false;
ok = true;
if (phy == DPIO_PHY0)
ports = BIT(PORT_B) | BIT(PORT_C);
else
ports = BIT(PORT_A);
for_each_port_masked(port, ports) {
int lane;
for (lane = 0; lane < 4; lane++)
ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane),
LATENCY_OPTIM,
lane != 1 ? LATENCY_OPTIM : 0,
"BXT_PORT_TX_DW14_LN(%d, %d)", port, lane);
}
/* PLL Rcomp code offset */
ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
"BXT_PORT_CL1CM_DW9(%d)", phy);
ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
"BXT_PORT_CL1CM_DW10(%d)", phy);
/* Power gating */
mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
"BXT_PORT_CL1CM_DW28(%d)", phy);
if (phy == DPIO_PHY0)
ok &= _CHK(BXT_PORT_CL2CM_DW6_BC,
DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
"BXT_PORT_CL2CM_DW6_BC");
/*
* TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS,
* at least on stepping A this bit is read-only and fixed at 0.
*/
if (phy == DPIO_PHY0) {
u32 grc_code = dev_priv->bxt_phy_grc;
grc_code = grc_code << GRC_CODE_FAST_SHIFT |
grc_code << GRC_CODE_SLOW_SHIFT |
grc_code;
mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
GRC_CODE_NOM_MASK;
ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code,
"BXT_PORT_REF_DW6(%d)", DPIO_PHY0);
mask = GRC_DIS | GRC_RDY_OVRD;
ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask,
"BXT_PORT_REF_DW8(%d)", DPIO_PHY0);
}
return ok;
#undef _CHK
}
void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv)
{
if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) ||
!broxton_phy_verify_state(dev_priv, DPIO_PHY1))
i915_report_error(dev_priv, "DDI PHY state mismatch\n");
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@ -2044,12 +2238,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
intel_ddi_clock_get(encoder, pipe_config);
}
static void intel_ddi_destroy(struct drm_encoder *encoder)
{
/* HDMI has nothing special to destroy, so we can go with this. */
intel_dp_encoder_destroy(encoder);
}
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@ -2068,7 +2256,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
}
static const struct drm_encoder_funcs intel_ddi_funcs = {
.destroy = intel_ddi_destroy,
.reset = intel_dp_encoder_reset,
.destroy = intel_dp_encoder_destroy,
};
static struct intel_connector *
@ -2167,6 +2356,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
intel_encoder->post_disable = intel_ddi_post_disable;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
intel_dig_port->port = port;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &

View File

@ -1530,45 +1530,47 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
static void _vlv_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
POSTING_READ(DPLL(pipe));
udelay(150);
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
}
static void vlv_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg = DPLL(pipe);
u32 dpll = pipe_config->dpll_hw_state.dpll;
assert_pipe_disabled(dev_priv, pipe);
/* PLL is protected by panel, make sure we can write it */
assert_panel_unlocked(dev_priv, pipe);
I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150);
if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
_vlv_enable_pll(crtc, pipe_config);
I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
POSTING_READ(DPLL_MD(pipe));
}
static void chv_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
static void _chv_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 tmp;
assert_pipe_disabled(dev_priv, pipe);
/* PLL is protected by panel, make sure we can write it */
assert_panel_unlocked(dev_priv, pipe);
mutex_lock(&dev_priv->sb_lock);
/* Enable back the 10bit clock to display controller */
@ -1589,6 +1591,21 @@ static void chv_enable_pll(struct intel_crtc *crtc,
/* Check PLL is locked */
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
}
static void chv_enable_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
assert_pipe_disabled(dev_priv, pipe);
/* PLL is protected by panel, make sure we can write it */
assert_panel_unlocked(dev_priv, pipe);
if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
_chv_enable_pll(crtc, pipe_config);
if (pipe != PIPE_A) {
/*
@ -3198,12 +3215,12 @@ void intel_finish_reset(struct drm_device *dev)
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned reset_counter;
bool pending;
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
if (intel_crtc->reset_counter != reset_counter)
return false;
spin_lock_irq(&dev->event_lock);
@ -3805,9 +3822,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
intel_crtc->unpin_work = NULL;
if (work->event)
drm_send_vblank_event(intel_crtc->base.dev,
intel_crtc->pipe,
work->event);
drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
drm_crtc_vblank_put(&intel_crtc->base);
@ -4088,12 +4103,6 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(FDI_RX_TUSIZE1(pipe),
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/*
* Sometimes spurious CPU pipe underruns happen during FDI
* training, at least with VGA+HDMI cloning. Suppress them.
*/
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
@ -4128,8 +4137,6 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
intel_fdi_normal_train(crtc);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
const struct drm_display_mode *adjusted_mode =
@ -4732,6 +4739,18 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (WARN_ON(intel_crtc->active))
return;
/*
* Sometimes spurious CPU pipe underruns happen during FDI
* training, at least with VGA+HDMI cloning. Suppress them.
*
* On ILK we get an occasional spurious CPU pipe underruns
* between eDP port A enable and vdd enable. Also PCH port
* enable seems to result in the occasional CPU pipe underrun.
*
* Spurious PCH underruns also occur during PCH enabling.
*/
if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
if (intel_crtc->config->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
@ -4753,8 +4772,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
@ -4796,6 +4813,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
/* Must wait for vblank to avoid spurious PCH FIFO underruns */
if (intel_crtc->config->has_pch_encoder)
intel_wait_for_vblank(dev, pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@ -4948,8 +4966,15 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
if (intel_crtc->config->has_pch_encoder)
/*
* Sometimes spurious CPU pipe underruns happen when the
* pipe is already disabled, but FDI RX/TX is still enabled.
* Happens at least with VGA+HDMI cloning. Suppress them.
*/
if (intel_crtc->config->has_pch_encoder) {
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
}
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
@ -4957,22 +4982,12 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
drm_crtc_vblank_off(crtc);
assert_vblank_disabled(crtc);
/*
* Sometimes spurious CPU pipe underruns happen when the
* pipe is already disabled, but FDI RX/TX is still enabled.
* Happens at least with VGA+HDMI cloning. Suppress them.
*/
if (intel_crtc->config->has_pch_encoder)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_disable_pipe(intel_crtc);
ironlake_pfit_disable(intel_crtc, false);
if (intel_crtc->config->has_pch_encoder) {
if (intel_crtc->config->has_pch_encoder)
ironlake_fdi_disable(crtc);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
}
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
@ -5002,6 +5017,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
ironlake_fdi_pll_disable(intel_crtc);
}
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
}
@ -5329,9 +5345,8 @@ static void intel_update_cdclk(struct drm_device *dev)
intel_update_max_cdclk(dev);
}
static void broxton_set_cdclk(struct drm_device *dev, int frequency)
static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t divider;
uint32_t ratio;
uint32_t current_freq;
@ -5445,33 +5460,46 @@ static void broxton_set_cdclk(struct drm_device *dev, int frequency)
return;
}
intel_update_cdclk(dev);
intel_update_cdclk(dev_priv->dev);
}
void broxton_init_cdclk(struct drm_device *dev)
static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t val;
if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
return false;
/*
* NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
* or else the reset will hang because there is no PCH to respond.
* Move the handshake programming to initialization sequence.
* Previously was left up to BIOS.
*/
val = I915_READ(HSW_NDE_RSTWRN_OPT);
val &= ~RESET_PCH_HANDSHAKE_ENABLE;
I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
/* TODO: Check for a valid CDCLK rate */
/* Enable PG1 for cdclk */
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
return false;
}
if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
return false;
}
return true;
}
bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
{
return broxton_cdclk_is_enabled(dev_priv);
}
void broxton_init_cdclk(struct drm_i915_private *dev_priv)
{
/* check if cd clock is enabled */
if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
DRM_DEBUG_KMS("Display already initialized\n");
if (broxton_cdclk_is_enabled(dev_priv)) {
DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
return;
}
DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
/*
* FIXME:
* - The initial CDCLK needs to be read from VBT.
@ -5479,7 +5507,7 @@ void broxton_init_cdclk(struct drm_device *dev)
* - check if setting the max (or any) cdclk freq is really necessary
* here, it belongs to modeset time
*/
broxton_set_cdclk(dev, 624000);
broxton_set_cdclk(dev_priv, 624000);
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
POSTING_READ(DBUF_CTL);
@ -5490,10 +5518,8 @@ void broxton_init_cdclk(struct drm_device *dev)
DRM_ERROR("DBuf power enable timeout!\n");
}
void broxton_uninit_cdclk(struct drm_device *dev)
void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
POSTING_READ(DBUF_CTL);
@ -5503,9 +5529,7 @@ void broxton_uninit_cdclk(struct drm_device *dev)
DRM_ERROR("DBuf power disable timeout!\n");
/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
broxton_set_cdclk(dev, 19200);
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
broxton_set_cdclk(dev_priv, 19200);
}
static const struct skl_cdclk_entry {
@ -6072,14 +6096,12 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
if (!intel_crtc->config->has_dsi_encoder) {
if (IS_CHERRYVIEW(dev)) {
chv_prepare_pll(intel_crtc, intel_crtc->config);
chv_enable_pll(intel_crtc, intel_crtc->config);
} else {
vlv_prepare_pll(intel_crtc, intel_crtc->config);
vlv_enable_pll(intel_crtc, intel_crtc->config);
}
if (IS_CHERRYVIEW(dev)) {
chv_prepare_pll(intel_crtc, intel_crtc->config);
chv_enable_pll(intel_crtc, intel_crtc->config);
} else {
vlv_prepare_pll(intel_crtc, intel_crtc->config);
vlv_enable_pll(intel_crtc, intel_crtc->config);
}
for_each_encoder_on_crtc(dev, crtc, encoder)
@ -6117,7 +6139,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->state);
int pipe = intel_crtc->pipe;
enum pipe pipe = intel_crtc->pipe;
if (WARN_ON(intel_crtc->active))
return;
@ -7173,11 +7195,15 @@ static void vlv_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV;
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (crtc->pipe != PIPE_A)
pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
if (!pipe_config->has_dsi_encoder)
pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
DPLL_EXT_BUFFER_ENABLE_VLV;
pipe_config->dpll_hw_state.dpll_md =
(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
@ -7186,11 +7212,14 @@ static void chv_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
DPLL_VCO_ENABLE;
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (crtc->pipe != PIPE_A)
pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
if (!pipe_config->has_dsi_encoder)
pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
pipe_config->dpll_hw_state.dpll_md =
(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
@ -7200,11 +7229,20 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
enum pipe pipe = crtc->pipe;
u32 mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
u32 coreclk, reg_val;
/* Enable Refclk */
I915_WRITE(DPLL(pipe),
pipe_config->dpll_hw_state.dpll &
~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
/* No need to actually set up the DPLL with DSI */
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
return;
mutex_lock(&dev_priv->sb_lock);
bestn = pipe_config->dpll.n;
@ -7291,14 +7329,21 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = crtc->pipe;
i915_reg_t dpll_reg = DPLL(crtc->pipe);
enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 loopfilter, tribuf_calcntr;
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
u32 dpio_val;
int vco;
/* Enable Refclk and SSC */
I915_WRITE(DPLL(pipe),
pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
/* No need to actually set up the DPLL with DSI */
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
return;
bestn = pipe_config->dpll.n;
bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
bestm1 = pipe_config->dpll.m1;
@ -7309,12 +7354,6 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
dpio_val = 0;
loopfilter = 0;
/*
* Enable Refclk and SSC
*/
I915_WRITE(dpll_reg,
pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
mutex_lock(&dev_priv->sb_lock);
/* p1 and p2 divider */
@ -7929,9 +7968,6 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
if (crtc_state->has_dsi_encoder)
return 0;
if (!crtc_state->clock_set &&
!chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
@ -7953,9 +7989,6 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
if (crtc_state->has_dsi_encoder)
return 0;
if (!crtc_state->clock_set &&
!vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
@ -8008,8 +8041,8 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
u32 mdiv;
int refclk = 100000;
/* In case of MIPI DPLL will not even be used */
if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
/* In case of DSI, DPLL will not be used */
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
return;
mutex_lock(&dev_priv->sb_lock);
@ -8105,6 +8138,10 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
int refclk = 100000;
/* In case of DSI, DPLL will not be used */
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
return;
mutex_lock(&dev_priv->sb_lock);
cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
@ -9533,7 +9570,7 @@ static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
to_intel_atomic_state(old_state);
unsigned int req_cdclk = old_intel_state->dev_cdclk;
broxton_set_cdclk(dev, req_cdclk);
broxton_set_cdclk(to_i915(dev), req_cdclk);
}
/* compute the max rate for new configuration */
@ -10903,9 +10940,10 @@ static bool page_flip_finished(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned reset_counter;
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
reset_counter = i915_reset_counter(&dev_priv->gpu_error);
if (crtc->reset_counter != reset_counter)
return true;
/*
@ -11359,7 +11397,6 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
if (mmio_flip->req) {
WARN_ON(__i915_wait_request(mmio_flip->req,
mmio_flip->crtc->reset_counter,
false, NULL,
&mmio_flip->i915->rps.mmioflips));
i915_gem_request_unreference__unlocked(mmio_flip->req);
@ -11567,8 +11604,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup;
intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
ret = -EIO;
goto cleanup;
}
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
@ -11654,7 +11696,7 @@ cleanup_unpin:
intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
cleanup_pending:
if (!IS_ERR_OR_NULL(request))
i915_gem_request_cancel(request);
i915_add_request_no_flush(request);
atomic_dec(&intel_crtc->unpin_work_count);
mutex_unlock(&dev->struct_mutex);
cleanup:
@ -11704,7 +11746,7 @@ retry:
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
drm_send_vblank_event(dev, pipe, event);
drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&dev->event_lock);
}
}
@ -12686,7 +12728,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
if (INTEL_INFO(dev)->gen < 4)
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
if (!adjust) {
@ -12721,6 +12763,9 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
PIPE_CONF_CHECK_X(dsi_pll.div);
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
@ -13401,6 +13446,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
}
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (state->legacy_cursor_update)
continue;
ret = intel_crtc_wait_for_pending_flips(crtc);
if (ret)
return ret;
@ -13414,12 +13462,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
return ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
u32 reset_counter;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->struct_mutex);
if (!ret && !async) {
for_each_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
@ -13428,25 +13473,18 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
continue;
ret = __i915_wait_request(intel_plane_state->wait_req,
reset_counter, true,
NULL, NULL);
/* Swallow -EIO errors to allow updates during hw lockup. */
if (ret == -EIO)
ret = 0;
if (ret)
true, NULL, NULL);
if (ret) {
/* Any hang should be swallowed by the wait */
WARN_ON(ret == -EIO);
mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
mutex_unlock(&dev->struct_mutex);
break;
}
}
if (!ret)
return 0;
mutex_lock(&dev->struct_mutex);
drm_atomic_helper_cleanup_planes(dev, state);
}
mutex_unlock(&dev->struct_mutex);
return ret;
}
@ -13488,7 +13526,7 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
drm_crtc_vblank_count(crtc),
msecs_to_jiffies(50));
WARN_ON(!lret);
WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
drm_crtc_vblank_put(crtc);
}
@ -13790,10 +13828,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
*/
if (needs_modeset(crtc_state))
ret = i915_gem_object_wait_rendering(old_obj, true);
/* Swallow -EIO errors to allow updates during hw lockup. */
if (ret && ret != -EIO)
if (ret) {
/* GPU hangs should have been swallowed by the wait */
WARN_ON(ret == -EIO);
return ret;
}
}
/* For framebuffer backed by dmabuf, wait for fence */

View File

@ -2215,6 +2215,15 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
POSTING_READ(DP_A);
udelay(500);
/*
* [DevILK] Work around required when enabling DP PLL
* while a pipe is enabled going to FDI:
* 1. Wait for the start of vertical blank on the enabled pipe going to FDI
* 2. Program DP PLL enable
*/
if (IS_GEN5(dev_priv))
intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
intel_dp->DP |= DP_PLL_ENABLE;
I915_WRITE(DP_A, intel_dp->DP);
@ -2630,7 +2639,6 @@ static void intel_enable_dp(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
enum port port = dp_to_dig_port(intel_dp)->port;
enum pipe pipe = crtc->pipe;
if (WARN_ON(dp_reg & DP_PORT_EN))
@ -2641,35 +2649,12 @@ static void intel_enable_dp(struct intel_encoder *encoder)
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_init_panel_power_sequencer(intel_dp);
/*
* We get an occasional spurious underrun between the port
* enable and vdd enable, when enabling port A eDP.
*
* FIXME: Not sure if this applies to (PCH) port D eDP as well
*/
if (port == PORT_A)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_dp_enable_port(intel_dp);
if (port == PORT_A && IS_GEN5(dev_priv)) {
/*
* Underrun reporting for the other pipe was disabled in
* g4x_pre_enable_dp(). The eDP PLL and port have now been
* enabled, so it's now safe to re-enable underrun reporting.
*/
intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
}
edp_panel_vdd_on(intel_dp);
edp_panel_on(intel_dp);
edp_panel_vdd_off(intel_dp, true);
if (port == PORT_A)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
pps_unlock(intel_dp);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
@ -2711,26 +2696,11 @@ static void vlv_enable_dp(struct intel_encoder *encoder)
static void g4x_pre_enable_dp(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
intel_dp_prepare(encoder);
if (port == PORT_A && IS_GEN5(dev_priv)) {
/*
* We get FIFO underruns on the other pipe when
* enabling the CPU eDP PLL, and when enabling CPU
* eDP port. We could potentially avoid the PLL
* underrun with a vblank wait just prior to enabling
* the PLL, but that doesn't appear to help the port
* enable case. Just sweep it all under the rug.
*/
intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
}
/* Only ilk+ has port A */
if (port == PORT_A)
ironlake_edp_pll_on(intel_dp);
@ -3806,7 +3776,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* downstream port information. So, an early return here saves
* time from performing other operations which are not required.
*/
if (!intel_dp->sink_count)
if (!is_edp(intel_dp) && !intel_dp->sink_count)
return false;
/* Check if the panel supports PSR */
@ -4339,6 +4309,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
if (is_edp(intel_dp))
return connector_status_connected;
/* if there's no downstream port, we're done */
if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
return connector_status_connected;
@ -4608,6 +4581,15 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp->compliance_test_type = 0;
intel_dp->compliance_test_data = 0;
if (intel_dp->is_mst) {
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
intel_dp->is_mst,
intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
}
goto out;
}
@ -4665,20 +4647,9 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
}
out:
if (status != connector_status_connected) {
if ((status != connector_status_connected) &&
(intel_dp->is_mst == false))
intel_dp_unset_edid(intel_dp);
/*
* If we were in MST mode, and device is not there,
* get out of MST mode
*/
if (intel_dp->is_mst) {
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
}
}
intel_display_power_put(to_i915(dev), power_domain);
return;
@ -4851,6 +4822,11 @@ intel_dp_set_property(struct drm_connector *connector,
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
if (HAS_GMCH_DISPLAY(dev_priv) &&
val == DRM_MODE_SCALE_CENTER) {
DRM_DEBUG_KMS("centering not supported\n");
return -EINVAL;
}
if (intel_connector->panel.fitting_mode == val) {
/* the eDP scaling property is not changed */
@ -4914,7 +4890,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
@ -4956,7 +4932,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
edp_panel_vdd_schedule_off(intel_dp);
}
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp;

View File

@ -1295,17 +1295,9 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
uint32_t temp;
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
/*
* Definition of each bit polarity has been changed
* after A1 stepping
*/
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
temp &= ~PORT_PLL_REF_SEL;
else
temp |= PORT_PLL_REF_SEL;
/* Non-SSC reference */
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_REF_SEL;
I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
/* Disable 10 bit clock */
@ -1652,10 +1644,7 @@ static void intel_ddi_pll_init(struct drm_device *dev)
DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
DRM_ERROR("LCPLL1 is disabled\n");
} else if (IS_BROXTON(dev)) {
broxton_init_cdclk(dev);
broxton_ddi_phy_init(dev);
} else {
} else if (!IS_BROXTON(dev_priv)) {
/*
* The LCPLL register should be turned on by the BIOS. For now
* let's just check its state and print errors in case

View File

@ -497,6 +497,11 @@ struct intel_crtc_state {
/* Actual register state of the dpll, for shared dpll cross-checking. */
struct intel_dpll_hw_state dpll_hw_state;
/* DSI PLL registers */
struct {
u32 ctrl, div;
} dsi_pll;
int pipe_bpp;
struct intel_link_m_n dp_m_n;
@ -1224,12 +1229,16 @@ void intel_prepare_reset(struct drm_device *dev);
void intel_finish_reset(struct drm_device *dev);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
void broxton_init_cdclk(struct drm_device *dev);
void broxton_uninit_cdclk(struct drm_device *dev);
void broxton_ddi_phy_init(struct drm_device *dev);
void broxton_ddi_phy_uninit(struct drm_device *dev);
void broxton_init_cdclk(struct drm_i915_private *dev_priv);
void broxton_uninit_cdclk(struct drm_i915_private *dev_priv);
bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv);
void broxton_ddi_phy_init(struct drm_i915_private *dev_priv);
void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv);
void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv);
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
@ -1268,6 +1277,8 @@ u32 skl_plane_ctl_rotation(unsigned int rotation);
void intel_csr_ucode_init(struct drm_i915_private *);
void intel_csr_load_program(struct drm_i915_private *);
void intel_csr_ucode_fini(struct drm_i915_private *);
void intel_csr_ucode_suspend(struct drm_i915_private *);
void intel_csr_ucode_resume(struct drm_i915_private *);
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
@ -1278,6 +1289,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
@ -1462,8 +1475,8 @@ int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_fini(struct drm_i915_private *);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv);
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv);
void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);

View File

@ -290,16 +290,26 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int ret;
DRM_DEBUG_KMS("\n");
pipe_config->has_dsi_encoder = true;
if (fixed_mode)
if (fixed_mode) {
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
if (HAS_GMCH_DISPLAY(dev_priv))
intel_gmch_panel_fitting(crtc, pipe_config,
intel_connector->panel.fitting_mode);
else
intel_pch_panel_fitting(crtc, pipe_config,
intel_connector->panel.fitting_mode);
}
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
@ -311,6 +321,12 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
pipe_config->cpu_transcoder = TRANSCODER_DSI_A;
}
ret = intel_compute_dsi_pll(encoder, pipe_config);
if (ret)
return false;
pipe_config->clock_set = true;
return true;
}
@ -498,14 +514,19 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum pipe pipe = intel_crtc->pipe;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
enum port port;
u32 tmp;
DRM_DEBUG_KMS("\n");
intel_enable_dsi_pll(encoder);
/*
* The BIOS may leave the PLL in a wonky state where it doesn't
* lock. It needs to be fully powered down to fix it.
*/
intel_disable_dsi_pll(encoder);
intel_enable_dsi_pll(encoder, crtc->config);
intel_dsi_prepare(encoder);
/* Panel Enable over CRC PMIC */
@ -515,19 +536,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
msleep(intel_dsi->panel_on_delay);
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
/*
* Disable DPOunit clock gating, can stall pipe
* and we need DPLL REFA always enabled
*/
tmp = I915_READ(DPLL(pipe));
tmp |= DPLL_REF_CLK_ENABLE_VLV;
I915_WRITE(DPLL(pipe), tmp);
/* update the hw state for DPLL */
intel_crtc->config->dpll_hw_state.dpll =
DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
/* Disable DPOunit clock gating, can stall pipe */
tmp = I915_READ(DSPCLK_GATE_D);
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, tmp);
@ -679,11 +688,16 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
drm_panel_unprepare(intel_dsi->panel);
msleep(intel_dsi->panel_off_delay);
msleep(intel_dsi->panel_pwr_cycle_delay);
/* Panel Disable over CRC PMIC */
if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
/*
* FIXME As we do with eDP, just make a note of the time here
* and perform the wait before the next panel power on.
*/
msleep(intel_dsi->panel_pwr_cycle_delay);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@ -716,11 +730,12 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
/* Due to some hardware limitations on BYT, MIPI Port C DPI
* Enable bit does not get set. To check whether DSI Port C
* was enabled in BIOS, check the Pipe B enable bit
/*
* Due to some hardware limitations on VLV/CHV, the DPI enable
* bit in port C control register does not get set. As a
* workaround, check pipe B conf instead.
*/
if (IS_VALLEYVIEW(dev) && port == PORT_C)
if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && port == PORT_C)
enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
/* Try command mode if video mode not enabled */
@ -826,13 +841,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
if (IS_BROXTON(dev))
bxt_dsi_get_pipe_config(encoder, pipe_config);
/*
* DPLL_MD is not used in case of DSI, reading will get some default value
* set dpll_md = 0
*/
pipe_config->dpll_hw_state.dpll_md = 0;
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
pipe_config);
if (!pclk)
return;
@ -845,7 +855,7 @@ intel_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
DRM_DEBUG_KMS("\n");
@ -1183,6 +1193,48 @@ static int intel_dsi_get_modes(struct drm_connector *connector)
return 1;
}
static int intel_dsi_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = connector->dev;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_crtc *crtc;
int ret;
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
if (property == dev->mode_config.scaling_mode_property) {
if (val == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
if (HAS_GMCH_DISPLAY(dev) &&
val == DRM_MODE_SCALE_CENTER) {
DRM_DEBUG_KMS("centering not supported\n");
return -EINVAL;
}
if (intel_connector->panel.fitting_mode == val)
return 0;
intel_connector->panel.fitting_mode = val;
}
crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->state->enable) {
/*
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
intel_crtc_restore_mode(crtc);
}
return 0;
}
static void intel_dsi_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@ -1225,11 +1277,25 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.detect = intel_dsi_detect,
.destroy = intel_dsi_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dsi_set_property,
.atomic_get_property = intel_connector_atomic_get_property,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
static void intel_dsi_add_properties(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
if (connector->panel.fixed_mode) {
drm_mode_create_scaling_mode_property(dev);
drm_object_attach_property(&connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
}
}
void intel_dsi_init(struct drm_device *dev)
{
struct intel_dsi *intel_dsi;
@ -1353,8 +1419,6 @@ void intel_dsi_init(struct drm_device *dev)
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_connector_register(connector);
drm_panel_attach(intel_dsi->panel, connector);
mutex_lock(&dev->mode_config.mutex);
@ -1373,6 +1437,11 @@ void intel_dsi_init(struct drm_device *dev)
}
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_dsi_add_properties(intel_connector);
drm_connector_register(connector);
intel_panel_setup_backlight(connector, INVALID_PIPE);
return;

View File

@ -127,11 +127,15 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
}
bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port);
int intel_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void intel_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config);
void intel_disable_dsi_pll(struct intel_encoder *encoder);
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
struct intel_crtc_state *config);
void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port);
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);

View File

@ -30,12 +30,7 @@
#include "i915_drv.h"
#include "intel_dsi.h"
struct dsi_mnp {
u32 dsi_pll_ctrl;
u32 dsi_pll_div;
};
static const u32 lfsr_converts[] = {
static const u16 lfsr_converts[] = {
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
106, 53, 282, 397, 454, 227, 113, 56, 284, 142, /* 81 - 90 */
@ -57,7 +52,8 @@ static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt,
}
static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
struct dsi_mnp *dsi_mnp, int target_dsi_clk)
struct intel_crtc_state *config,
int target_dsi_clk)
{
unsigned int calc_m = 0, calc_p = 0;
unsigned int m_min, m_max, p_min = 2, p_max = 6;
@ -103,8 +99,8 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
/* register has log2(N1), this works fine for powers of two */
n = ffs(n) - 1;
m_seed = lfsr_converts[calc_m - 62];
dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
dsi_mnp->dsi_pll_div = n << DSI_PLL_N1_DIV_SHIFT |
config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT |
m_seed << DSI_PLL_M1_DIV_SHIFT;
return 0;
@ -114,54 +110,55 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
* XXX: The muxing and gating is hard coded for now. Need to add support for
* sharing PLLs with two DSI outputs.
*/
static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
int ret;
struct dsi_mnp dsi_mnp;
u32 dsi_clk;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
ret = dsi_calc_mnp(dev_priv, &dsi_mnp, dsi_clk);
ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
if (ret) {
DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
return;
return ret;
}
if (intel_dsi->ports & (1 << PORT_A))
dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
if (intel_dsi->ports & (1 << PORT_C))
dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
config->dsi_pll.ctrl |= DSI_PLL_CLK_GATE_DSI1_DSIPLL;
config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
config->dsi_pll.div, config->dsi_pll.ctrl);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
return 0;
}
static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
static void vlv_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
DRM_DEBUG_KMS("\n");
mutex_lock(&dev_priv->sb_lock);
vlv_configure_dsi_pll(encoder);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, config->dsi_pll.div);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL,
config->dsi_pll.ctrl & ~DSI_PLL_VCO_EN);
/* wait at least 0.5 us after ungating before enabling VCO */
usleep_range(1, 10);
tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
tmp |= DSI_PLL_VCO_EN;
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
if (wait_for(vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL) &
DSI_PLL_LOCK, 20)) {
@ -177,7 +174,7 @@ static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
DRM_DEBUG_KMS("\n");
@ -224,7 +221,7 @@ static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
DRM_DEBUG_KMS("\n");
@ -251,14 +248,15 @@ static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp)
bpp, pipe_bpp);
}
static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
u32 m = 0, p = 0, n;
int refclk = 25000;
int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
int i;
DRM_DEBUG_KMS("\n");
@ -268,6 +266,9 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
pll_div = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_DIVIDER);
mutex_unlock(&dev_priv->sb_lock);
config->dsi_pll.ctrl = pll_ctl & ~DSI_PLL_LOCK;
config->dsi_pll.div = pll_div;
/* mask out other bits and extract the P1 divisor */
pll_ctl &= DSI_PLL_P1_POST_DIV_MASK;
pll_ctl = pll_ctl >> (DSI_PLL_P1_POST_DIV_SHIFT - 2);
@ -313,7 +314,8 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
return pclk;
}
static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
struct intel_crtc_state *config)
{
u32 pclk;
u32 dsi_clk;
@ -327,15 +329,9 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
return 0;
}
dsi_ratio = I915_READ(BXT_DSI_PLL_CTL) &
BXT_DSI_PLL_RATIO_MASK;
config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);
/* Invalid DSI ratio ? */
if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
DRM_ERROR("Invalid DSI pll ratio(%u) programmed\n", dsi_ratio);
return 0;
}
dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
@ -348,12 +344,13 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
return pclk;
}
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
struct intel_crtc_state *config)
{
if (IS_BROXTON(encoder->base.dev))
return bxt_dsi_get_pclk(encoder, pipe_bpp);
return bxt_dsi_get_pclk(encoder, pipe_bpp, config);
else
return vlv_dsi_get_pclk(encoder, pipe_bpp);
return vlv_dsi_get_pclk(encoder, pipe_bpp, config);
}
static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
@ -370,7 +367,8 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
}
/* Program BXT Mipi clocks and dividers */
static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
@ -390,8 +388,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
/* Get the current DSI rate(actual) */
pll_ratio = I915_READ(BXT_DSI_PLL_CTL) &
BXT_DSI_PLL_RATIO_MASK;
pll_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
/*
@ -427,16 +424,15 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
}
static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u8 dsi_ratio;
u32 dsi_clk;
u32 val;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
intel_dsi->lane_count);
/*
* From clock diagram, to get PLL ratio divider, divide double of DSI
@ -445,9 +441,9 @@ static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
*/
dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
return false;
return -ECHRNG;
}
/*
@ -455,27 +451,19 @@ static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
* Spec says both have to be programmed, even if one is not getting
* used. Configure MIPI_CLOCK_CTL dividers in modeset
*/
val = I915_READ(BXT_DSI_PLL_CTL);
val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
val &= ~BXT_DSI_FREQ_SEL_MASK;
val &= ~BXT_DSI_PLL_RATIO_MASK;
val |= (dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2);
config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2;
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
*/
if (dsi_ratio <= 50) {
val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
val |= BXT_DSI_PLL_PVD_RATIO_1;
}
if (dsi_ratio <= 50)
config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
I915_WRITE(BXT_DSI_PLL_CTL, val);
POSTING_READ(BXT_DSI_PLL_CTL);
return true;
return 0;
}
static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@ -484,23 +472,13 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n");
val = I915_READ(BXT_DSI_PLL_ENABLE);
if (val & BXT_DSI_PLL_DO_ENABLE) {
WARN(1, "DSI PLL already enabled. Disabling it.\n");
val &= ~BXT_DSI_PLL_DO_ENABLE;
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
}
/* Configure PLL vales */
if (!bxt_configure_dsi_pll(encoder)) {
DRM_ERROR("Configure DSI PLL failed, abort PLL enable\n");
return;
}
I915_WRITE(BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
POSTING_READ(BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
for_each_dsi_port(port, intel_dsi->ports)
bxt_dsi_program_clocks(encoder->base.dev, port);
bxt_dsi_program_clocks(encoder->base.dev, port, config);
/* Enable DSI PLL */
val = I915_READ(BXT_DSI_PLL_ENABLE);
@ -526,14 +504,28 @@ bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
return false;
}
void intel_enable_dsi_pll(struct intel_encoder *encoder)
int intel_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_device *dev = encoder->base.dev;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_enable_dsi_pll(encoder);
return vlv_compute_dsi_pll(encoder, config);
else if (IS_BROXTON(dev))
bxt_enable_dsi_pll(encoder);
return bxt_compute_dsi_pll(encoder, config);
return -ENODEV;
}
void intel_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct drm_device *dev = encoder->base.dev;
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
vlv_enable_dsi_pll(encoder, config);
else if (IS_BROXTON(dev))
bxt_enable_dsi_pll(encoder, config);
}
void intel_disable_dsi_pll(struct intel_encoder *encoder)

View File

@ -27,8 +27,34 @@
#include "intel_guc_fwif.h"
#include "i915_guc_reg.h"
struct drm_i915_gem_request;
/*
* This structure primarily describes the GEM object shared with the GuC.
* The GEM object is held for the entire lifetime of our interaction with
* the GuC, being allocated before the GuC is loaded with its firmware.
* Because there's no way to update the address used by the GuC after
* initialisation, the shared object must stay pinned into the GGTT as
* long as the GuC is in use. We also keep the first page (only) mapped
* into kernel address space, as it includes shared data that must be
* updated on every request submission.
*
* The single GEM object described here is actually made up of several
* separate areas, as far as the GuC is concerned. The first page (kept
* kmap'd) includes the "process decriptor" which holds sequence data for
* the doorbell, and one cacheline which actually *is* the doorbell; a
* write to this will "ring the doorbell" (i.e. send an interrupt to the
* GuC). The subsequent pages of the client object constitute the work
* queue (a circular array of work items), again described in the process
* descriptor. Work queue pages are mapped momentarily as required.
*
* Finally, we also keep a few statistics here, including the number of
* submissions to each engine, and a record of the last submission failure
* (if any).
*/
struct i915_guc_client {
struct drm_i915_gem_object *client_obj;
void *client_base; /* first page (only) of above */
struct intel_context *owner;
struct intel_guc *guc;
uint32_t priority;
@ -43,13 +69,14 @@ struct i915_guc_client {
uint32_t wq_offset;
uint32_t wq_size;
uint32_t wq_tail;
uint32_t wq_head;
uint32_t unused; /* Was 'wq_head' */
/* GuC submission statistics & status */
uint64_t submissions[GUC_MAX_ENGINES_NUM];
uint32_t q_fail;
uint32_t b_fail;
int retcode;
int spare; /* pad to 32 DWords */
};
enum intel_guc_fw_status {

View File

@ -1412,8 +1412,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
hdmi_to_dig_port(intel_hdmi));
}
if (!live_status)
DRM_DEBUG_KMS("Live status not up!");
if (!live_status) {
DRM_DEBUG_KMS("HDMI live status down\n");
/*
* Live status register is not reliable on all intel platforms.
* So consider live_status only for certain platforms, for
* others, read EDID to determine presence of sink.
*/
if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
live_status = true;
}
intel_hdmi_unset_edid(connector);

View File

@ -571,15 +571,14 @@ clear_err:
goto out;
timeout:
DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
bus->adapter.name, bus->reg0 & 0xff);
DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
bus->adapter.name, bus->reg0 & 0xff);
I915_WRITE(GMBUS0, 0);
/*
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
* instead. Use EAGAIN to have i2c core retry.
*/
bus->force_bit = 1;
ret = -EAGAIN;
out:
@ -597,10 +596,15 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit)
if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
else
if (ret < 0)
bus->force_bit &= ~GMBUS_FORCE_BIT_RETRY;
} else {
ret = do_gmbus_xfer(adapter, msgs, num);
if (ret == -EAGAIN)
bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
}
mutex_unlock(&dev_priv->gmbus_mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
@ -718,11 +722,16 @@ void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
mutex_lock(&dev_priv->gmbus_mutex);
bus->force_bit += force_bit ? 1 : -1;
DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
force_bit ? "en" : "dis", adapter->name,
bus->force_bit);
mutex_unlock(&dev_priv->gmbus_mutex);
}
void intel_teardown_gmbus(struct drm_device *dev)

View File

@ -229,9 +229,6 @@ enum {
static int intel_lr_context_pin(struct intel_context *ctx,
struct intel_engine_cs *engine);
static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
struct drm_i915_gem_object *default_ctx_obj);
/**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@ -418,6 +415,7 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
struct drm_i915_gem_request *rq1)
{
struct drm_i915_private *dev_priv = rq0->i915;
unsigned int fw_domains = rq0->engine->fw_domains;
execlists_update_context(rq0);
@ -425,11 +423,11 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
execlists_update_context(rq1);
spin_lock_irq(&dev_priv->uncore.lock);
intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
execlists_elsp_write(rq0, rq1);
intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
spin_unlock_irq(&dev_priv->uncore.lock);
}
@ -552,7 +550,7 @@ static void intel_lrc_irq_handler(unsigned long data)
unsigned int csb_read = 0, i;
unsigned int submit_contexts = 0;
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine));
@ -577,7 +575,7 @@ static void intel_lrc_irq_handler(unsigned long data)
_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
engine->next_context_status_buffer << 8));
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_uncore_forcewake_put(dev_priv, engine->fw_domains);
spin_lock(&engine->execlist_lock);
@ -892,17 +890,8 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
*/
int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
struct drm_i915_private *dev_priv;
int ret;
WARN_ON(req == NULL);
dev_priv = req->i915;
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
@ -1016,7 +1005,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
i915_gem_execbuffer_move_to_active(vmas, params->request);
i915_gem_execbuffer_retire_commands(params);
return 0;
}
@ -1057,7 +1045,7 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
return;
ret = intel_engine_idle(engine);
if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret);
@ -1093,8 +1081,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
struct page *lrc_state_page;
uint32_t *lrc_reg_state;
void *vaddr;
u32 *lrc_reg_state;
int ret;
WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
@ -1104,19 +1092,20 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
if (ret)
return ret;
lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
if (WARN_ON(!lrc_state_page)) {
ret = -ENODEV;
vaddr = i915_gem_object_pin_map(ctx_obj);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto unpin_ctx_obj;
}
lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
goto unpin_map;
ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
intel_lr_context_descriptor_update(ctx, engine);
lrc_reg_state = kmap(lrc_state_page);
lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
ctx_obj->dirty = true;
@ -1127,6 +1116,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
return ret;
unpin_map:
i915_gem_object_unpin_map(ctx_obj);
unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj);
@ -1159,7 +1150,7 @@ void intel_lr_context_unpin(struct intel_context *ctx,
WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
if (--ctx->engine[engine->id].pin_count == 0) {
kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
i915_gem_object_unpin_map(ctx_obj);
intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
ctx->engine[engine->id].lrc_vma = NULL;
@ -1579,14 +1570,22 @@ out:
return ret;
}
static void lrc_init_hws(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->dev->dev_private;
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
(u32)engine->status_page.gfx_addr);
POSTING_READ(RING_HWS_PGA(engine->mmio_base));
}
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int next_context_status_buffer_hw;
lrc_setup_hardware_status_page(engine,
dev_priv->kernel_context->engine[engine->id].state);
lrc_init_hws(engine);
I915_WRITE_IMR(engine,
~(engine->irq_enable_mask | engine->irq_keep_mask));
@ -1625,7 +1624,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
intel_engine_init_hangcheck(engine);
return 0;
return intel_mocs_init_engine(engine);
}
static int gen8_init_render_ring(struct intel_engine_cs *engine)
@ -1945,15 +1944,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
if (ret)
return ret;
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
/* w/a for post sync ops following a GPGPU operation we
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf,
(PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
@ -1961,7 +1963,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine));
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
/* We're thrashing one dword of HWS. */
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
intel_logical_ring_emit(ringbuf, MI_NOOP);
return intel_logical_ring_advance_and_submit(request);
}
@ -2048,7 +2053,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
i915_gem_batch_pool_fini(&engine->batch_pool);
if (engine->status_page.obj) {
kunmap(sg_page(engine->status_page.obj->pages->sgl));
i915_gem_object_unpin_map(engine->status_page.obj);
engine->status_page.obj = NULL;
}
@ -2086,10 +2091,30 @@ logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift)
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
}
static int
lrc_setup_hws(struct intel_engine_cs *engine,
struct drm_i915_gem_object *dctx_obj)
{
void *hws;
/* The HWSP is part of the default context object in LRC mode. */
engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
LRC_PPHWSP_PN * PAGE_SIZE;
hws = i915_gem_object_pin_map(dctx_obj);
if (IS_ERR(hws))
return PTR_ERR(hws);
engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
engine->status_page.obj = dctx_obj;
return 0;
}
static int
logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
{
struct intel_context *dctx = to_i915(dev)->kernel_context;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_context *dctx = dev_priv->kernel_context;
enum forcewake_domains fw_domains;
int ret;
/* Intentionally left blank. */
@ -2111,6 +2136,20 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
logical_ring_init_platform_invariants(engine);
fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
RING_ELSP(engine),
FW_REG_WRITE);
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
RING_CONTEXT_STATUS_PTR(engine),
FW_REG_READ | FW_REG_WRITE);
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
RING_CONTEXT_STATUS_BUF_BASE(engine),
FW_REG_READ);
engine->fw_domains = fw_domains;
ret = i915_cmd_parser_init_ring(engine);
if (ret)
goto error;
@ -2128,6 +2167,13 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine)
goto error;
}
/* And setup the hardware status page. */
ret = lrc_setup_hws(engine, dctx->engine[engine->id].state);
if (ret) {
DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret);
goto error;
}
return 0;
error:
@ -2378,15 +2424,16 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
}
static int
populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
populate_lr_context(struct intel_context *ctx,
struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *engine,
struct intel_ringbuffer *ringbuf)
{
struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
struct page *page;
uint32_t *reg_state;
void *vaddr;
u32 *reg_state;
int ret;
if (!ppgtt)
@ -2398,18 +2445,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
return ret;
}
ret = i915_gem_object_get_pages(ctx_obj);
if (ret) {
DRM_DEBUG_DRIVER("Could not get object pages\n");
vaddr = i915_gem_object_pin_map(ctx_obj);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret;
}
i915_gem_object_pin_pages(ctx_obj);
ctx_obj->dirty = true;
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
/* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
* commands followed by (reg, value) pairs. The values we are setting here are
@ -2514,8 +2560,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
make_rpcs(dev));
}
kunmap_atomic(reg_state);
i915_gem_object_unpin_pages(ctx_obj);
i915_gem_object_unpin_map(ctx_obj);
return 0;
}
@ -2542,6 +2587,7 @@ void intel_lr_context_free(struct intel_context *ctx)
if (ctx == ctx->i915->kernel_context) {
intel_unpin_ringbuffer_obj(ringbuf);
i915_gem_object_ggtt_unpin(ctx_obj);
i915_gem_object_unpin_map(ctx_obj);
}
WARN_ON(ctx->engine[i].pin_count);
@ -2588,24 +2634,6 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
return ret;
}
static void lrc_setup_hardware_status_page(struct intel_engine_cs *engine,
struct drm_i915_gem_object *default_ctx_obj)
{
struct drm_i915_private *dev_priv = engine->dev->dev_private;
struct page *page;
/* The HWSP is part of the default context object in LRC mode. */
engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj)
+ LRC_PPHWSP_PN * PAGE_SIZE;
page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN);
engine->status_page.page_addr = kmap(page);
engine->status_page.obj = default_ctx_obj;
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
(u32)engine->status_page.gfx_addr);
POSTING_READ(RING_HWS_PGA(engine->mmio_base));
}
/**
* intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
* @ctx: LR context to create.
@ -2669,13 +2697,12 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
}
ret = engine->init_context(req);
i915_add_request_no_flush(req);
if (ret) {
DRM_ERROR("ring init context: %d\n",
ret);
i915_gem_request_cancel(req);
goto error_ringbuf;
}
i915_add_request_no_flush(req);
}
return 0;
@ -2688,10 +2715,9 @@ error_deref_obj:
return ret;
}
void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx)
void intel_lr_context_reset(struct drm_i915_private *dev_priv,
struct intel_context *ctx)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv) {
@ -2699,23 +2725,23 @@ void intel_lr_context_reset(struct drm_device *dev,
ctx->engine[engine->id].state;
struct intel_ringbuffer *ringbuf =
ctx->engine[engine->id].ringbuf;
void *vaddr;
uint32_t *reg_state;
struct page *page;
if (!ctx_obj)
continue;
if (i915_gem_object_get_pages(ctx_obj)) {
WARN(1, "Failed get_pages for context obj\n");
vaddr = i915_gem_object_pin_map(ctx_obj);
if (WARN_ON(IS_ERR(vaddr)))
continue;
}
page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
reg_state = kmap_atomic(page);
reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
ctx_obj->dirty = true;
reg_state[CTX_RING_HEAD+1] = 0;
reg_state[CTX_RING_TAIL+1] = 0;
kunmap_atomic(reg_state);
i915_gem_object_unpin_map(ctx_obj);
ringbuf->head = 0;
ringbuf->tail = 0;

View File

@ -24,6 +24,8 @@
#ifndef _INTEL_LRC_H_
#define _INTEL_LRC_H_
#include "intel_ringbuffer.h"
#define GEN8_LR_CONTEXT_ALIGN 4096
/* Execlists regs */
@ -34,6 +36,7 @@
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define RING_CONTEXT_STATUS_BUF_BASE(ring) _MMIO((ring)->mmio_base + 0x370)
#define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
@ -103,8 +106,11 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *engine);
void intel_lr_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine);
void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx);
struct drm_i915_private;
void intel_lr_context_reset(struct drm_i915_private *dev_priv,
struct intel_context *ctx);
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *engine);

View File

@ -128,9 +128,9 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
/**
* get_mocs_settings()
* @dev: DRM device.
* @dev_priv: i915 device.
* @table: Output table that will be made to point at appropriate
* MOCS values for the device.
* MOCS values for the device.
*
* This function will return the values of the MOCS table that needs to
* be programmed for the platform. It will return the values that need
@ -138,21 +138,21 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
*
* Return: true if there are applicable MOCS settings for the device.
*/
static bool get_mocs_settings(struct drm_device *dev,
static bool get_mocs_settings(struct drm_i915_private *dev_priv,
struct drm_i915_mocs_table *table)
{
bool result = false;
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table;
result = true;
} else if (IS_BROXTON(dev)) {
} else if (IS_BROXTON(dev_priv)) {
table->size = ARRAY_SIZE(broxton_mocs_table);
table->table = broxton_mocs_table;
result = true;
} else {
WARN_ONCE(INTEL_INFO(dev)->gen >= 9,
WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
"Platform that should have a MOCS table does not.\n");
}
@ -178,11 +178,50 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
}
}
/**
* intel_mocs_init_engine() - emit the mocs control table
* @engine: The engine for whom to emit the registers.
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
* given table starting at the given address.
*
* Return: 0 on success, otherwise the error status.
*/
int intel_mocs_init_engine(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = to_i915(engine->dev);
struct drm_i915_mocs_table table;
unsigned int index;
if (!get_mocs_settings(dev_priv, &table))
return 0;
if (WARN_ON(table.size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
for (index = 0; index < table.size; index++)
I915_WRITE(mocs_register(engine->id, index),
table.table[index].control_value);
/*
* Ok, now set the unused entries to uncached. These entries
* are officially undefined and no contract for the contents
* and settings is given for these entries.
*
* Entry 0 in the table is uncached - so we are just writing
* that value to all the used entries.
*/
for (; index < GEN9_NUM_MOCS_ENTRIES; index++)
I915_WRITE(mocs_register(engine->id, index),
table.table[0].control_value);
return 0;
}
/**
* emit_mocs_control_table() - emit the mocs control table
* @req: Request to set up the MOCS table for.
* @table: The values to program into the control regs.
* @ring: The engine for whom to emit the registers.
*
* This function simply emits a MI_LOAD_REGISTER_IMM command for the
* given table starting at the given address.
@ -190,10 +229,10 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
* Return: 0 on success, otherwise the error status.
*/
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table,
enum intel_engine_id ring)
const struct drm_i915_mocs_table *table)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
enum intel_engine_id engine = req->engine->id;
unsigned int index;
int ret;
@ -210,7 +249,8 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
for (index = 0; index < table->size; index++) {
intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
intel_logical_ring_emit_reg(ringbuf,
mocs_register(engine, index));
intel_logical_ring_emit(ringbuf,
table->table[index].control_value);
}
@ -224,8 +264,10 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
* that value to all the used entries.
*/
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
intel_logical_ring_emit_reg(ringbuf, mocs_register(ring, index));
intel_logical_ring_emit(ringbuf, table->table[0].control_value);
intel_logical_ring_emit_reg(ringbuf,
mocs_register(engine, index));
intel_logical_ring_emit(ringbuf,
table->table[0].control_value);
}
intel_logical_ring_emit(ringbuf, MI_NOOP);
@ -234,6 +276,14 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
return 0;
}
static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
u16 low,
u16 high)
{
return table->table[low].l3cc_value |
table->table[high].l3cc_value << 16;
}
/**
* emit_mocs_l3cc_table() - emit the mocs control table
* @req: Request to set up the MOCS table for.
@ -249,11 +299,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
struct intel_ringbuffer *ringbuf = req->ringbuf;
unsigned int count;
unsigned int i;
u32 value;
u32 filler = (table->table[0].l3cc_value & 0xffff) |
((table->table[0].l3cc_value & 0xffff) << 16);
int ret;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
@ -268,20 +314,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
intel_logical_ring_emit(ringbuf,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
for (i = 0, count = 0; i < table->size / 2; i++, count += 2) {
value = (table->table[count].l3cc_value & 0xffff) |
((table->table[count + 1].l3cc_value & 0xffff) << 16);
for (i = 0; i < table->size/2; i++) {
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
intel_logical_ring_emit(ringbuf, value);
intel_logical_ring_emit(ringbuf,
l3cc_combine(table, 2*i, 2*i+1));
}
if (table->size & 0x01) {
/* Odd table size - 1 left over */
value = (table->table[count].l3cc_value & 0xffff) |
((table->table[0].l3cc_value & 0xffff) << 16);
} else
value = filler;
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
i++;
}
/*
* Now set the rest of the table to uncached - use entry 0 as
@ -290,9 +334,7 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
*/
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
intel_logical_ring_emit(ringbuf, value);
value = filler;
intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
}
intel_logical_ring_emit(ringbuf, MI_NOOP);
@ -301,6 +343,47 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
return 0;
}
/**
* intel_mocs_init_l3cc_table() - program the mocs control table
* @dev: The the device to be programmed.
*
* This function simply programs the mocs registers for the given table
* starting at the given address. This register set is programmed in pairs.
*
* These registers may get programmed more than once, it is simpler to
* re-program 32 registers than maintain the state of when they were programmed.
* We are always reprogramming with the same values and this only on context
* start.
*
* Return: Nothing.
*/
void intel_mocs_init_l3cc_table(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_mocs_table table;
unsigned int i;
if (!get_mocs_settings(dev_priv, &table))
return;
for (i = 0; i < table.size/2; i++)
I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 2*i+1));
/* Odd table size - 1 left over */
if (table.size & 0x01) {
I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 2*i, 0));
i++;
}
/*
* Now set the rest of the table to uncached - use entry 0 as
* this will be uncached. Leave the last pair as initialised as
* they are reserved by the hardware.
*/
for (; i < (GEN9_NUM_MOCS_ENTRIES / 2); i++)
I915_WRITE(GEN9_LNCFCMOCS(i), l3cc_combine(&table, 0, 0));
}
/**
* intel_rcs_context_init_mocs() - program the MOCS register.
* @req: Request to set up the MOCS tables for.
@ -322,17 +405,11 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req)
struct drm_i915_mocs_table t;
int ret;
if (get_mocs_settings(req->engine->dev, &t)) {
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
/* Program the control registers */
for_each_engine_id(engine, dev_priv, id) {
ret = emit_mocs_control_table(req, &t, id);
if (ret)
return ret;
}
if (get_mocs_settings(req->i915, &t)) {
/* Program the RCS control registers */
ret = emit_mocs_control_table(req, &t);
if (ret)
return ret;
/* Now program the l3cc registers */
ret = emit_mocs_l3cc_table(req, &t);

View File

@ -53,5 +53,7 @@
#include "i915_drv.h"
int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
void intel_mocs_init_l3cc_table(struct drm_device *dev);
int intel_mocs_init_engine(struct intel_engine_cs *ring);
#endif

View File

@ -34,12 +34,6 @@
#include "i915_drv.h"
#include "intel_drv.h"
#define PCI_ASLE 0xe4
#define PCI_ASLS 0xfc
#define PCI_SWSCI 0xe8
#define PCI_SWSCI_SCISEL (1 << 15)
#define PCI_SWSCI_GSSCIE (1 << 0)
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
#define ACPI_CLID 0x01ac /* current lid state indicator */
@ -246,13 +240,12 @@ struct opregion_asle_ext {
#define MAX_DSLP 1500
#ifdef CONFIG_ACPI
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_swsci *swsci = dev_priv->opregion.swsci;
u32 main_function, sub_function, scic;
u16 pci_swsci;
u16 swsci_val;
u32 dslp;
if (!swsci)
@ -300,16 +293,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
swsci->scic = scic;
/* Ensure SCI event is selected and event trigger is cleared. */
pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
pci_swsci |= PCI_SWSCI_SCISEL;
pci_swsci &= ~PCI_SWSCI_GSSCIE;
pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
pci_read_config_word(dev->pdev, SWSCI, &swsci_val);
if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
swsci_val |= SWSCI_SCISEL;
swsci_val &= ~SWSCI_GSSCIE;
pci_write_config_word(dev->pdev, SWSCI, swsci_val);
}
/* Use event trigger to tell bios to check the mail. */
pci_swsci |= PCI_SWSCI_GSSCIE;
pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
swsci_val |= SWSCI_GSSCIE;
pci_write_config_word(dev->pdev, SWSCI, swsci_val);
/* Poll for the result. */
#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
@ -905,9 +898,6 @@ static void swsci_setup(struct drm_device *dev)
opregion->swsci_gbda_sub_functions,
opregion->swsci_sbcb_sub_functions);
}
#else /* CONFIG_ACPI */
static inline void swsci_setup(struct drm_device *dev) {}
#endif /* CONFIG_ACPI */
static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
{
@ -943,16 +933,14 @@ int intel_opregion_setup(struct drm_device *dev)
BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
pci_read_config_dword(dev->pdev, ASLS, &asls);
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
#ifdef CONFIG_ACPI
INIT_WORK(&opregion->asle_work, asle_work);
#endif
base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB);
if (!base)
@ -1024,3 +1012,31 @@ err_out:
memunmap(base);
return err;
}
int
intel_opregion_get_panel_type(struct drm_device *dev)
{
u32 panel_details;
int ret;
ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
if (ret) {
DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
ret);
return ret;
}
ret = (panel_details >> 8) & 0xff;
if (ret > 0x10) {
DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret);
return -EINVAL;
}
/* fall back to VBT panel type? */
if (ret == 0x0) {
DRM_DEBUG_KMS("No panel type in OpRegion\n");
return -ENODEV;
}
return ret - 1;
}

View File

@ -247,7 +247,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
ret = intel_ring_begin(req, 4);
if (ret) {
i915_gem_request_cancel(req);
i915_add_request_no_flush(req);
return ret;
}
@ -290,7 +290,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
ret = intel_ring_begin(req, 2);
if (ret) {
i915_gem_request_cancel(req);
i915_add_request_no_flush(req);
return ret;
}
@ -356,7 +356,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
ret = intel_ring_begin(req, 6);
if (ret) {
i915_gem_request_cancel(req);
i915_add_request_no_flush(req);
return ret;
}
@ -431,7 +431,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
ret = intel_ring_begin(req, 2);
if (ret) {
i915_gem_request_cancel(req);
i915_add_request_no_flush(req);
return ret;
}

View File

@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
if (panel->backlight.combination_mode) {
u8 lbpc;
pci_read_config_byte(dev_priv->dev->pdev, PCI_LBPC, &lbpc);
pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc);
val *= lbpc;
}
@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
lbpc = level * 0xfe / panel->backlight.max + 1;
level /= lbpc;
pci_write_config_byte(dev_priv->dev->pdev, PCI_LBPC, lbpc);
pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc);
}
if (IS_GEN4(dev_priv)) {

View File

@ -2483,7 +2483,7 @@ static void ilk_wm_merge(struct drm_device *dev,
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
config->num_pipes_active > 1)
return;
last_enabled_level = 0;
/* ILK: FBC WM must be disabled always */
merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
@ -4587,7 +4587,7 @@ void intel_set_rps(struct drm_device *dev, u8 val)
gen6_set_rps(dev, val);
}
static void gen9_disable_rps(struct drm_device *dev)
static void gen9_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -4595,12 +4595,20 @@ static void gen9_disable_rps(struct drm_device *dev)
I915_WRITE(GEN9_PG_ENABLE, 0);
}
static void gen9_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RP_CONTROL, 0);
}
static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_RP_CONTROL, 0);
}
static void cherryview_disable_rps(struct drm_device *dev)
@ -4804,6 +4812,16 @@ static void gen9_enable_rps(struct drm_device *dev)
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
/*
* BIOS could leave the Hw Turbo enabled, so need to explicitly
* clear out the Control register just to avoid inconsitency
* with debugfs interface, which will show Turbo as enabled
* only and that is not expected by the User after adding the
* WaGsvDisableTurbo. Apart from this there is no problem even
* if the Turbo is left enabled in the Control register, as the
* Up/Down interrupts would remain masked.
*/
gen9_disable_rps(dev);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return;
}
@ -4997,7 +5015,8 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_STATE, 0);
/* Clear the DBG now so we don't confuse earlier errors */
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
gtfifodbg = I915_READ(GTFIFODBG);
if (gtfifodbg) {
DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
}
@ -5528,7 +5547,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
gtfifodbg = I915_READ(GTFIFODBG);
gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV |
GT_FIFO_FREE_ENTRIES_CHV);
if (gtfifodbg) {
DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
gtfifodbg);
@ -5627,7 +5647,8 @@ static void valleyview_enable_rps(struct drm_device *dev)
valleyview_check_pctx(dev_priv);
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
gtfifodbg = I915_READ(GTFIFODBG);
if (gtfifodbg) {
DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
@ -6265,9 +6286,10 @@ void intel_disable_gt_powersave(struct drm_device *dev)
intel_suspend_gt_powersave(dev);
mutex_lock(&dev_priv->rps.hw_lock);
if (INTEL_INFO(dev)->gen >= 9)
if (INTEL_INFO(dev)->gen >= 9) {
gen9_disable_rc6(dev);
gen9_disable_rps(dev);
else if (IS_CHERRYVIEW(dev))
} else if (IS_CHERRYVIEW(dev))
cherryview_disable_rps(dev);
else if (IS_VALLEYVIEW(dev))
valleyview_disable_rps(dev);
@ -6882,23 +6904,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
gen6_check_mch_setup(dev);
}
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
/*
* Disable trickle feed and enable pnd deadline calculation
*/
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
I915_WRITE(CBR1_VLV, 0);
}
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
vlv_init_display_clock_gating(dev_priv);
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
@ -6981,8 +6990,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
vlv_init_display_clock_gating(dev_priv);
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
I915_WRITE(GEN7_FF_THREAD_MODE,

View File

@ -959,9 +959,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
}
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev))
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX);
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt */
/* WaDisablePartialResolveInVc:skl,bxt */
@ -980,7 +981,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
@ -1097,7 +1098,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
/* This is tied to WaForceContextSaveRestoreNonCoherent */
if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
/*
*Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
@ -2086,6 +2088,7 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
i915_gem_object_unpin_map(ringbuf->obj);
else
iounmap(ringbuf->virtual_start);
ringbuf->virtual_start = NULL;
ringbuf->vma = NULL;
i915_gem_object_ggtt_unpin(ringbuf->obj);
}
@ -2096,10 +2099,13 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj = ringbuf->obj;
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
unsigned flags = PIN_OFFSET_BIAS | 4096;
void *addr;
int ret;
if (HAS_LLC(dev_priv) && !obj->stolen) {
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
if (ret)
return ret;
@ -2107,13 +2113,14 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
if (ret)
goto err_unpin;
ringbuf->virtual_start = i915_gem_object_pin_map(obj);
if (ringbuf->virtual_start == NULL) {
ret = -ENOMEM;
addr = i915_gem_object_pin_map(obj);
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto err_unpin;
}
} else {
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
flags | PIN_MAPPABLE);
if (ret)
return ret;
@ -2124,14 +2131,15 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
/* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(dev_priv);
ringbuf->virtual_start = ioremap_wc(ggtt->mappable_base +
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
if (ringbuf->virtual_start == NULL) {
addr = ioremap_wc(ggtt->mappable_base +
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
if (addr == NULL) {
ret = -ENOMEM;
goto err_unpin;
}
}
ringbuf->virtual_start = addr;
ringbuf->vma = i915_gem_obj_to_ggtt(obj);
return 0;
@ -2363,8 +2371,7 @@ int intel_engine_idle(struct intel_engine_cs *engine)
/* Make sure we do not trigger any retires */
return __i915_wait_request(req,
atomic_read(&to_i915(engine->dev)->gpu_error.reset_counter),
to_i915(engine->dev)->mm.interruptible,
req->i915->mm.interruptible,
NULL, NULL);
}
@ -2486,19 +2493,9 @@ static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes)
int intel_ring_begin(struct drm_i915_gem_request *req,
int num_dwords)
{
struct intel_engine_cs *engine;
struct drm_i915_private *dev_priv;
struct intel_engine_cs *engine = req->engine;
int ret;
WARN_ON(req == NULL);
engine = req->engine;
dev_priv = req->i915;
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t));
if (ret)
return ret;
@ -3189,7 +3186,7 @@ intel_stop_engine(struct intel_engine_cs *engine)
return;
ret = intel_engine_idle(engine);
if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error))
if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
engine->name, ret);

View File

@ -270,6 +270,7 @@ struct intel_engine_cs {
spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
struct list_head execlist_queue;
struct list_head execlist_retired_req_list;
unsigned int fw_domains;
unsigned int next_context_status_buffer;
unsigned int idle_lite_restore_wa;
bool disable_lite_restore_wa;

View File

@ -397,11 +397,6 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_MODESET) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~( \
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
SKL_DISPLAY_DC_OFF_POWER_DOMAINS)) | \
BIT(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
@ -419,39 +414,21 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_GMBUS) | \
BIT(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS ( \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_TRANSCODER_DSI_A) | \
BIT(POWER_DOMAIN_TRANSCODER_DSI_C) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
BIT(POWER_DOMAIN_PORT_DSI) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
BIT(POWER_DOMAIN_MODESET) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~(BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS)) | \
BIT(POWER_DOMAIN_INIT))
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
WARN(!IS_BROXTON(dev), "Platform doesn't support DC9.\n");
WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
"DC9 already programmed to be enabled.\n");
WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
"DC5 still not disabled to enable DC9.\n");
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
"DC9 already programmed to be enabled.\n");
WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
"DC5 still not disabled to enable DC9.\n");
WARN_ONCE(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on.\n");
WARN_ONCE(intel_irqs_enabled(dev_priv),
"Interrupts not disabled yet.\n");
/*
* TODO: check for the following to verify the conditions to enter DC9
@ -464,9 +441,10 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
{
WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n");
WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
"DC5 still not disabled.\n");
WARN_ONCE(intel_irqs_enabled(dev_priv),
"Interrupts not disabled yet.\n");
WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
"DC5 still not disabled.\n");
/*
* TODO: check for the following to verify DC9 state was indeed
@ -514,10 +492,9 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
state, rewrites);
}
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
{
uint32_t val;
uint32_t mask;
u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
if (IS_BROXTON(dev_priv))
@ -525,10 +502,30 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
else
mask |= DC_STATE_EN_UPTO_DC6;
return mask;
}
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
{
u32 val;
val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
dev_priv->csr.dc_state, val);
dev_priv->csr.dc_state = val;
}
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
{
uint32_t val;
uint32_t mask;
if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
state &= dev_priv->csr.allowed_dc_mask;
val = I915_READ(DC_STATE_EN);
mask = gen9_dc_mask(dev_priv);
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
val & mask, state);
@ -573,13 +570,9 @@ static void assert_csr_loaded(struct drm_i915_private *dev_priv)
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
SKL_DISP_PW_2);
WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
"Platform doesn't support DC5.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
@ -589,7 +582,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
void gen9_enable_dc5(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc5(dev_priv);
@ -600,11 +593,6 @@ static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
"Platform doesn't support DC6.\n");
WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Backlight is not disabled.\n");
WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
@ -630,6 +618,45 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
static void
gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
enum skl_disp_power_wells power_well_id = power_well->data;
u32 val;
u32 mask;
mask = SKL_POWER_WELL_REQ(power_well_id);
val = I915_READ(HSW_PWR_WELL_KVMR);
if (WARN_ONCE(val & mask, "Clearing unexpected KVMR request for %s\n",
power_well->name))
I915_WRITE(HSW_PWR_WELL_KVMR, val & ~mask);
val = I915_READ(HSW_PWR_WELL_BIOS);
val |= I915_READ(HSW_PWR_WELL_DEBUG);
if (!(val & mask))
return;
/*
* DMC is known to force on the request bits for power well 1 on SKL
* and BXT and the misc IO power well on SKL but we don't expect any
* other request bits to be set, so WARN for those.
*/
if (power_well_id == SKL_DISP_PW_1 ||
((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
power_well_id == SKL_DISP_PW_MISC_IO))
DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
"by DMC\n", power_well->name);
else
WARN_ONCE(1, "Clearing unexpected auxiliary requests for %s\n",
power_well->name);
I915_WRITE(HSW_PWR_WELL_BIOS, val & ~mask);
I915_WRITE(HSW_PWR_WELL_DEBUG, val & ~mask);
}
static void skl_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
@ -684,10 +711,6 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
if (!is_enabled) {
DRM_DEBUG_KMS("Enabling %s\n", power_well->name);
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
state_mask), 1))
DRM_ERROR("%s enable timeout\n",
power_well->name);
check_fuse_status = true;
}
} else {
@ -696,8 +719,16 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
}
if (IS_GEN9(dev_priv))
gen9_sanitize_power_well_requests(dev_priv, power_well);
}
if (wait_for(!!(I915_READ(HSW_PWR_WELL_DRIVER) & state_mask) == enable,
1))
DRM_ERROR("%s %s timeout\n",
power_well->name, enable ? "enable" : "disable");
if (check_fuse_status) {
if (power_well->data == SKL_DISP_PW_1) {
if (wait_for((I915_READ(SKL_FUSE_STATUS) &
@ -779,11 +810,19 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
if (IS_BROXTON(dev_priv)) {
broxton_cdclk_verify_state(dev_priv);
broxton_ddi_phy_verify_state(dev_priv);
}
}
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
if (!dev_priv->csr.dmc_payload)
return;
if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6)
skl_enable_dc6(dev_priv);
else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)
@ -900,6 +939,17 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
return enabled;
}
static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
{
I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
/*
* Disable trickle feed and enable pnd deadline calculation
*/
I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
I915_WRITE(CBR1_VLV, 0);
}
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@ -922,6 +972,8 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
I915_WRITE(DPLL(pipe), val);
}
vlv_init_display_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
valleyview_enable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@ -1560,34 +1612,56 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_runtime_pm_put(dev_priv);
}
#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
#define HSW_DISPLAY_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_PIPE_C) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
BIT(POWER_DOMAIN_TRANSCODER_B) | \
BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_PLLS) | \
BIT(POWER_DOMAIN_AUX_A) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_GMBUS) | \
BIT(POWER_DOMAIN_INIT))
#define HSW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_INIT))
#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
HSW_ALWAYS_ON_POWER_DOMAINS | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
#define BDW_DISPLAY_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
#define BDW_DISPLAY_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_PIPE_C) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
BIT(POWER_DOMAIN_TRANSCODER_B) | \
BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
#define VLV_DISPLAY_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
BIT(POWER_DOMAIN_TRANSCODER_B) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_DSI) | \
BIT(POWER_DOMAIN_PORT_CRT) | \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_GMBUS) | \
BIT(POWER_DOMAIN_INIT))
#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
@ -1617,6 +1691,28 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_DISPLAY_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PIPE_A) | \
BIT(POWER_DOMAIN_PIPE_B) | \
BIT(POWER_DOMAIN_PIPE_C) | \
BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
BIT(POWER_DOMAIN_TRANSCODER_A) | \
BIT(POWER_DOMAIN_TRANSCODER_B) | \
BIT(POWER_DOMAIN_TRANSCODER_C) | \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
BIT(POWER_DOMAIN_PORT_DSI) | \
BIT(POWER_DOMAIN_VGA) | \
BIT(POWER_DOMAIN_AUDIO) | \
BIT(POWER_DOMAIN_AUX_B) | \
BIT(POWER_DOMAIN_AUX_C) | \
BIT(POWER_DOMAIN_AUX_D) | \
BIT(POWER_DOMAIN_GMBUS) | \
BIT(POWER_DOMAIN_INIT))
#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
@ -1684,7 +1780,7 @@ static struct i915_power_well hsw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = HSW_ALWAYS_ON_POWER_DOMAINS,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
@ -1698,7 +1794,7 @@ static struct i915_power_well bdw_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = BDW_ALWAYS_ON_POWER_DOMAINS,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
@ -1733,7 +1829,7 @@ static struct i915_power_well vlv_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.data = PUNIT_POWER_WELL_ALWAYS_ON,
},
@ -1791,7 +1887,7 @@ static struct i915_power_well chv_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = VLV_ALWAYS_ON_POWER_DOMAINS,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
@ -1801,7 +1897,7 @@ static struct i915_power_well chv_power_wells[] = {
* power wells don't actually exist. Pipe A power well is
* required for any pipe to work.
*/
.domains = VLV_DISPLAY_POWER_DOMAINS,
.domains = CHV_DISPLAY_POWER_DOMAINS,
.data = PIPE_A,
.ops = &chv_pipe_power_well_ops,
},
@ -1835,7 +1931,7 @@ static struct i915_power_well skl_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.data = SKL_DISP_PW_ALWAYS_ON,
},
@ -1891,44 +1987,16 @@ static struct i915_power_well skl_power_wells[] = {
},
};
void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
{
struct i915_power_well *well;
if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_enable(dev_priv, well);
well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
intel_power_well_enable(dev_priv, well);
}
void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
{
struct i915_power_well *well;
if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
return;
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
intel_power_well_disable(dev_priv, well);
}
static struct i915_power_well bxt_power_wells[] = {
{
.name = "always-on",
.always_on = 1,
.domains = BXT_DISPLAY_ALWAYS_ON_POWER_DOMAINS,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
},
{
.name = "power well 1",
.domains = BXT_DISPLAY_POWERWELL_1_POWER_DOMAINS,
.domains = 0,
.ops = &skl_power_well_ops,
.data = SKL_DISP_PW_1,
},
@ -1953,11 +2021,6 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
if (disable_power_well >= 0)
return !!disable_power_well;
if (IS_BROXTON(dev_priv)) {
DRM_DEBUG_KMS("Disabling display power well support\n");
return 0;
}
return 1;
}
@ -2109,9 +2172,10 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
}
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
uint32_t val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@ -2122,7 +2186,13 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
/* enable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
skl_pw1_misc_io_init(dev_priv);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_enable(dev_priv, well);
well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
intel_power_well_enable(dev_priv, well);
mutex_unlock(&power_domains->lock);
if (!resume)
@ -2137,6 +2207,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@ -2144,8 +2215,73 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
/* The spec doesn't call for removing the reset handshake flag */
/* disable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
skl_pw1_misc_io_fini(dev_priv);
well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO);
intel_power_well_disable(dev_priv, well);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
}
void bxt_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
uint32_t val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/*
* NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
* or else the reset will hang because there is no PCH to respond.
* Move the handshake programming to initialization sequence.
* Previously was left up to BIOS.
*/
val = I915_READ(HSW_NDE_RSTWRN_OPT);
val &= ~RESET_PCH_HANDSHAKE_ENABLE;
I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
/* Enable PG1 */
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_enable(dev_priv, well);
mutex_unlock(&power_domains->lock);
broxton_init_cdclk(dev_priv);
broxton_ddi_phy_init(dev_priv);
broxton_cdclk_verify_state(dev_priv);
broxton_ddi_phy_verify_state(dev_priv);
if (resume && dev_priv->csr.dmc_payload)
intel_csr_load_program(dev_priv);
}
void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
broxton_ddi_phy_uninit(dev_priv);
broxton_uninit_cdclk(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
/* Disable PG1 */
mutex_lock(&power_domains->lock);
well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
}
@ -2280,6 +2416,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
skl_display_core_init(dev_priv, resume);
} else if (IS_BROXTON(dev)) {
bxt_display_core_init(dev_priv, resume);
} else if (IS_CHERRYVIEW(dev)) {
mutex_lock(&power_domains->lock);
chv_phy_control_init(dev_priv);
@ -2317,6 +2455,8 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_display_core_uninit(dev_priv);
else if (IS_BROXTON(dev_priv))
bxt_display_core_uninit(dev_priv);
}
/**

View File

@ -60,7 +60,11 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{
mod_timer_pinned(&d->timer, jiffies + 1);
d->wake_count++;
hrtimer_start_range_ns(&d->timer,
ktime_set(0, NSEC_PER_MSEC),
NSEC_PER_MSEC,
HRTIMER_MODE_REL);
}
static inline void
@ -107,22 +111,22 @@ static void
fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
enum forcewake_domain_id id;
for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
for_each_fw_domain_masked(d, fw_domains, dev_priv) {
fw_domain_wait_ack_clear(d);
fw_domain_get(d);
fw_domain_wait_ack(d);
}
for_each_fw_domain_masked(d, fw_domains, dev_priv)
fw_domain_wait_ack(d);
}
static void
fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
enum forcewake_domain_id id;
for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
for_each_fw_domain_masked(d, fw_domains, dev_priv) {
fw_domain_put(d);
fw_domain_posting_read(d);
}
@ -132,10 +136,9 @@ static void
fw_domains_posting_read(struct drm_i915_private *dev_priv)
{
struct intel_uncore_forcewake_domain *d;
enum forcewake_domain_id id;
/* No need to do for all, just do for first found */
for_each_fw_domain(d, dev_priv, id) {
for_each_fw_domain(d, dev_priv) {
fw_domain_posting_read(d);
break;
}
@ -145,12 +148,11 @@ static void
fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
enum forcewake_domain_id id;
if (dev_priv->uncore.fw_domains == 0)
return;
for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
for_each_fw_domain_masked(d, fw_domains, dev_priv)
fw_domain_reset(d);
fw_domains_posting_read(dev_priv);
@ -224,9 +226,11 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret;
}
static void intel_uncore_fw_release_timer(unsigned long arg)
static enum hrtimer_restart
intel_uncore_fw_release_timer(struct hrtimer *timer)
{
struct intel_uncore_forcewake_domain *domain = (void *)arg;
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
unsigned long irqflags;
assert_rpm_device_not_suspended(domain->i915);
@ -240,6 +244,8 @@ static void intel_uncore_fw_release_timer(unsigned long arg)
1 << domain->id);
spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags);
return HRTIMER_NORESTART;
}
void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
@ -248,7 +254,6 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
int retry_count = 100;
enum forcewake_domain_id id;
enum forcewake_domains fw = 0, active_domains;
/* Hold uncore.lock across reset to prevent any register access
@ -258,18 +263,18 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
while (1) {
active_domains = 0;
for_each_fw_domain(domain, dev_priv, id) {
if (del_timer_sync(&domain->timer) == 0)
for_each_fw_domain(domain, dev_priv) {
if (hrtimer_cancel(&domain->timer) == 0)
continue;
intel_uncore_fw_release_timer((unsigned long)domain);
intel_uncore_fw_release_timer(&domain->timer);
}
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
for_each_fw_domain(domain, dev_priv, id) {
if (timer_pending(&domain->timer))
active_domains |= (1 << id);
for_each_fw_domain(domain, dev_priv) {
if (hrtimer_active(&domain->timer))
active_domains |= domain->mask;
}
if (active_domains == 0)
@ -286,9 +291,9 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
WARN_ON(active_domains);
for_each_fw_domain(domain, dev_priv, id)
for_each_fw_domain(domain, dev_priv)
if (domain->wake_count)
fw |= 1 << id;
fw |= domain->mask;
if (fw)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
@ -310,21 +315,49 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void intel_uncore_ellc_detect(struct drm_device *dev)
static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
const unsigned int sets[4] = { 1, 1, 2, 2 };
const u32 cap = dev_priv->edram_cap;
if ((IS_HASWELL(dev) || IS_BROADWELL(dev) ||
INTEL_INFO(dev)->gen >= 9) &&
(__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) {
/* The docs do not explain exactly how the calculation can be
* made. It is somewhat guessable, but for now, it's always
* 128MB.
* NB: We can't write IDICR yet because we do not have gt funcs
return EDRAM_NUM_BANKS(cap) *
ways[EDRAM_WAYS_IDX(cap)] *
sets[EDRAM_SETS_IDX(cap)] *
1024 * 1024;
}
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
{
if (!HAS_EDRAM(dev_priv))
return 0;
/* The needed capability bits for size calculation
* are not there with pre gen9 so return 128MB always.
*/
if (INTEL_GEN(dev_priv) < 9)
return 128 * 1024 * 1024;
return gen9_edram_size(dev_priv);
}
static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
{
if (IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv) ||
INTEL_GEN(dev_priv) >= 9) {
dev_priv->edram_cap = __raw_i915_read32(dev_priv,
HSW_EDRAM_CAP);
/* NB: We can't write IDICR yet because we do not have gt funcs
* set up */
dev_priv->ellc_size = 128;
DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
} else {
dev_priv->edram_cap = 0;
}
if (HAS_EDRAM(dev_priv))
DRM_INFO("Found %lluMB of eDRAM\n",
intel_uncore_edram_size(dev_priv) / (1024 * 1024));
}
static bool
@ -410,16 +443,15 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_get)
return;
fw_domains &= dev_priv->uncore.fw_domains;
for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
if (domain->wake_count++)
fw_domains &= ~(1 << id);
fw_domains &= ~domain->mask;
}
if (fw_domains)
@ -477,21 +509,19 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_put)
return;
fw_domains &= dev_priv->uncore.fw_domains;
for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
if (WARN_ON(domain->wake_count == 0))
continue;
if (--domain->wake_count)
continue;
domain->wake_count++;
fw_domain_arm_timer(domain);
}
}
@ -539,18 +569,27 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
{
struct intel_uncore_forcewake_domain *domain;
enum forcewake_domain_id id;
if (!dev_priv->uncore.funcs.force_wake_get)
return;
for_each_fw_domain(domain, dev_priv, id)
for_each_fw_domain(domain, dev_priv)
WARN_ON(domain->wake_count);
}
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
#define __gen6_reg_read_fw_domains(offset) \
({ \
enum forcewake_domains __fwd; \
if (NEEDS_FORCE_WAKE(offset)) \
__fwd = FORCEWAKE_RENDER; \
else \
__fwd = 0; \
__fwd; \
})
#define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end))
#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
@ -564,6 +603,48 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
REG_RANGE((reg), 0x22000, 0x24000) || \
REG_RANGE((reg), 0x30000, 0x40000))
#define __vlv_reg_read_fw_domains(offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (!NEEDS_FORCE_WAKE(offset)) \
__fwd = 0; \
else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_RENDER; \
else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_MEDIA; \
__fwd; \
})
static const i915_reg_t gen8_shadowed_regs[] = {
GEN6_RPNSWREQ,
GEN6_RC_VIDEO_FREQ,
RING_TAIL(RENDER_RING_BASE),
RING_TAIL(GEN6_BSD_RING_BASE),
RING_TAIL(VEBOX_RING_BASE),
RING_TAIL(BLT_RING_BASE),
/* TODO: Other registers are not yet used */
};
static bool is_gen8_shadowed(u32 offset)
{
int i;
for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
if (offset == gen8_shadowed_regs[i].reg)
return true;
return false;
}
#define __gen8_reg_write_fw_domains(offset) \
({ \
enum forcewake_domains __fwd; \
if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
__fwd = FORCEWAKE_RENDER; \
else \
__fwd = 0; \
__fwd; \
})
#define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \
(REG_RANGE((reg), 0x2000, 0x4000) || \
REG_RANGE((reg), 0x5200, 0x8000) || \
@ -586,6 +667,34 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
REG_RANGE((reg), 0x9000, 0xB000) || \
REG_RANGE((reg), 0xF000, 0x10000))
#define __chv_reg_read_fw_domains(offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (!NEEDS_FORCE_WAKE(offset)) \
__fwd = 0; \
else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_RENDER; \
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
__fwd; \
})
#define __chv_reg_write_fw_domains(offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \
__fwd = 0; \
else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_RENDER; \
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
__fwd; \
})
#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
REG_RANGE((reg), 0xB00, 0x2000)
@ -618,6 +727,61 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
!FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
!FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
#define SKL_NEEDS_FORCE_WAKE(reg) \
((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
#define __gen9_reg_read_fw_domains(offset) \
({ \
enum forcewake_domains __fwd; \
if (!SKL_NEEDS_FORCE_WAKE(offset)) \
__fwd = 0; \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_RENDER; \
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
else \
__fwd = FORCEWAKE_BLITTER; \
__fwd; \
})
static const i915_reg_t gen9_shadowed_regs[] = {
RING_TAIL(RENDER_RING_BASE),
RING_TAIL(GEN6_BSD_RING_BASE),
RING_TAIL(VEBOX_RING_BASE),
RING_TAIL(BLT_RING_BASE),
GEN6_RPNSWREQ,
GEN6_RC_VIDEO_FREQ,
/* TODO: Other registers are not yet used */
};
static bool is_gen9_shadowed(u32 offset)
{
int i;
for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
if (offset == gen9_shadowed_regs[i].reg)
return true;
return false;
}
#define __gen9_reg_write_fw_domains(offset) \
({ \
enum forcewake_domains __fwd; \
if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \
__fwd = 0; \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_RENDER; \
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
__fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
else \
__fwd = FORCEWAKE_BLITTER; \
__fwd; \
})
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@ -633,15 +797,6 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
const bool read,
const bool before)
{
/* XXX. We limit the auto arming traces for mmio
* debugs on these platforms. There are just too many
* revealed by these and CI/Bat suffers from the noise.
* Please fix and then re-enable the automatic traces.
*/
if (i915.mmio_debug < 2 &&
(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
return;
if (WARN(check_for_unclaimed_mmio(dev_priv),
"Unclaimed register detected %s %s register 0x%x\n",
before ? "before" : "after",
@ -720,19 +875,17 @@ static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
enum forcewake_domain_id id;
if (WARN_ON(!fw_domains))
return;
/* Ideally GCC would be constant-fold and eliminate this loop */
for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
if (domain->wake_count) {
fw_domains &= ~(1 << id);
fw_domains &= ~domain->mask;
continue;
}
domain->wake_count++;
fw_domain_arm_timer(domain);
}
@ -743,9 +896,11 @@ static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
#define __gen6_read(x) \
static u##x \
gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
if (NEEDS_FORCE_WAKE(offset)) \
__force_wake_auto(dev_priv, FORCEWAKE_RENDER); \
fw_engine = __gen6_reg_read_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
@ -753,14 +908,9 @@ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
#define __vlv_read(x) \
static u##x \
vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine = 0; \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
if (!NEEDS_FORCE_WAKE(offset)) \
fw_engine = 0; \
else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER; \
else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_MEDIA; \
fw_engine = __vlv_reg_read_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
@ -770,40 +920,21 @@ vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
#define __chv_read(x) \
static u##x \
chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine = 0; \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
if (!NEEDS_FORCE_WAKE(offset)) \
fw_engine = 0; \
else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER; \
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
fw_engine = __chv_reg_read_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
#define SKL_NEEDS_FORCE_WAKE(reg) \
((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
#define __gen9_read(x) \
static u##x \
gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
if (!SKL_NEEDS_FORCE_WAKE(offset)) \
fw_engine = 0; \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER; \
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
else \
fw_engine = FORCEWAKE_BLITTER; \
fw_engine = __gen9_reg_read_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
@ -942,34 +1073,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool t
GEN6_WRITE_FOOTER; \
}
static const i915_reg_t gen8_shadowed_regs[] = {
FORCEWAKE_MT,
GEN6_RPNSWREQ,
GEN6_RC_VIDEO_FREQ,
RING_TAIL(RENDER_RING_BASE),
RING_TAIL(GEN6_BSD_RING_BASE),
RING_TAIL(VEBOX_RING_BASE),
RING_TAIL(BLT_RING_BASE),
/* TODO: Other registers are not yet used */
};
static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
int i;
for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i]))
return true;
return false;
}
#define __gen8_write(x) \
static void \
gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
__force_wake_auto(dev_priv, FORCEWAKE_RENDER); \
fw_engine = __gen8_reg_write_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
@ -977,64 +1088,22 @@ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
#define __chv_write(x) \
static void \
chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine = 0; \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
if (!NEEDS_FORCE_WAKE(offset) || \
is_gen8_shadowed(dev_priv, reg)) \
fw_engine = 0; \
else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER; \
else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
fw_engine = __chv_reg_write_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
static const i915_reg_t gen9_shadowed_regs[] = {
RING_TAIL(RENDER_RING_BASE),
RING_TAIL(GEN6_BSD_RING_BASE),
RING_TAIL(VEBOX_RING_BASE),
RING_TAIL(BLT_RING_BASE),
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_MEDIA_GEN9,
GEN6_RPNSWREQ,
GEN6_RC_VIDEO_FREQ,
/* TODO: Other registers are not yet used */
};
static bool is_gen9_shadowed(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
int i;
for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i]))
return true;
return false;
}
#define __gen9_write(x) \
static void \
gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
if (!SKL_NEEDS_FORCE_WAKE(offset) || \
is_gen9_shadowed(dev_priv, reg)) \
fw_engine = 0; \
else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER; \
else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_MEDIA; \
else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \
fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \
else \
fw_engine = FORCEWAKE_BLITTER; \
fw_engine = __gen9_reg_write_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
@ -1150,7 +1219,14 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
d->i915 = dev_priv;
d->id = domain_id;
setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d);
BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
d->mask = 1 << domain_id;
hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
d->timer.function = intel_uncore_fw_release_timer;
dev_priv->uncore.fw_domains |= (1 << domain_id);
@ -1189,7 +1265,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
if (IS_HASWELL(dev))
dev_priv->uncore.funcs.force_wake_put =
fw_domains_put_with_fifo;
else
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) {
@ -1253,7 +1333,7 @@ void intel_uncore_init(struct drm_device *dev)
i915_check_vgpu(dev);
intel_uncore_ellc_detect(dev);
intel_uncore_edram_detect(dev_priv);
intel_uncore_fw_domains_init(dev);
__intel_uncore_early_sanitize(dev, false);
@ -1715,3 +1795,111 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
return false;
}
static enum forcewake_domains
intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
enum forcewake_domains fw_domains;
if (intel_vgpu_active(dev_priv->dev))
return 0;
switch (INTEL_INFO(dev_priv)->gen) {
case 9:
fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
break;
case 8:
if (IS_CHERRYVIEW(dev_priv))
fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
else
fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
break;
case 7:
case 6:
if (IS_VALLEYVIEW(dev_priv))
fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg));
else
fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg));
break;
default:
MISSING_CASE(INTEL_INFO(dev_priv)->gen);
case 5: /* forcewake was introduced with gen6 */
case 4:
case 3:
case 2:
return 0;
}
WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
return fw_domains;
}
static enum forcewake_domains
intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
enum forcewake_domains fw_domains;
if (intel_vgpu_active(dev_priv->dev))
return 0;
switch (INTEL_INFO(dev_priv)->gen) {
case 9:
fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
break;
case 8:
if (IS_CHERRYVIEW(dev_priv))
fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg));
else
fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg));
break;
case 7:
case 6:
fw_domains = FORCEWAKE_RENDER;
break;
default:
MISSING_CASE(INTEL_INFO(dev_priv)->gen);
case 5:
case 4:
case 3:
case 2:
return 0;
}
WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
return fw_domains;
}
/**
* intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
* a register
* @dev_priv: pointer to struct drm_i915_private
* @reg: register in question
* @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
*
* Returns a set of forcewake domains required to be taken with for example
* intel_uncore_forcewake_get for the specified register to be accessible in the
* specified mode (read, write or read/write) with raw mmio accessors.
*
* NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
* callers to do FIFO management on their own or risk losing writes.
*/
enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
i915_reg_t reg, unsigned int op)
{
enum forcewake_domains fw_domains = 0;
WARN_ON(!op);
if (op & FW_REG_READ)
fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
if (op & FW_REG_WRITE)
fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
return fw_domains;
}