drm/i915: Convert dev_priv->dev backpointers to dev_priv->drm
Since drm_i915_private is now a subclass of drm_device we do not need to
chase the drm_i915_private->dev backpointer and can instead simply
access drm_i915_private->drm directly.
text data bss dec hex filename
1068757
4565 416 1073738 10624a drivers/gpu/drm/i915/i915.ko
1066949 4565 416 1071930 105b3a drivers/gpu/drm/i915/i915.ko
Created by the coccinelle script:
@@
struct drm_i915_private *d;
identifier i;
@@
(
- d->dev->i
+ d->drm.i
|
- d->dev
+ &d->drm
)
and for good measure the dev_priv->dev backpointer was removed entirely.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1467711623-2905-4-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
parent
ded8b07d4c
commit
91c8a326a1
@ -440,15 +440,15 @@ static void print_context_stats(struct seq_file *m,
|
||||
|
||||
memset(&stats, 0, sizeof(stats));
|
||||
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
if (dev_priv->kernel_context)
|
||||
per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
|
||||
|
||||
list_for_each_entry(file, &dev_priv->dev->filelist, lhead) {
|
||||
list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
|
||||
struct drm_i915_file_private *fpriv = file->driver_priv;
|
||||
idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
|
||||
}
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
print_file_stats(m, "[k]contexts", stats);
|
||||
}
|
||||
@ -2797,8 +2797,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
|
||||
seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
|
||||
#endif
|
||||
seq_printf(m, "PCI device power state: %s [%d]\n",
|
||||
pci_power_name(dev_priv->dev->pdev->current_state),
|
||||
dev_priv->dev->pdev->current_state);
|
||||
pci_power_name(dev_priv->drm.pdev->current_state),
|
||||
dev_priv->drm.pdev->current_state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -5098,7 +5098,7 @@ i915_cache_sharing_get(void *data, u64 *val)
|
||||
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
|
||||
|
||||
@ -5483,7 +5483,7 @@ void intel_display_crc_init(struct drm_device *dev)
|
||||
|
||||
int i915_debugfs_register(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_minor *minor = dev_priv->dev->primary;
|
||||
struct drm_minor *minor = dev_priv->drm.primary;
|
||||
int ret, i;
|
||||
|
||||
ret = i915_forcewake_create(minor->debugfs_root, minor);
|
||||
@ -5511,7 +5511,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
|
||||
|
||||
void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_minor *minor = dev_priv->dev->primary;
|
||||
struct drm_minor *minor = dev_priv->drm.primary;
|
||||
int i;
|
||||
|
||||
drm_debugfs_remove_files(i915_debugfs_list,
|
||||
|
@ -687,7 +687,7 @@ out:
|
||||
static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct apertures_struct *ap;
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
bool primary;
|
||||
int ret;
|
||||
@ -889,7 +889,7 @@ err_workqueues:
|
||||
*/
|
||||
static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
i915_gem_load_cleanup(dev_priv->dev);
|
||||
i915_gem_load_cleanup(&dev_priv->drm);
|
||||
i915_workqueues_cleanup(dev_priv);
|
||||
}
|
||||
|
||||
@ -944,7 +944,7 @@ static void i915_mmio_cleanup(struct drm_device *dev)
|
||||
*/
|
||||
static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int ret;
|
||||
|
||||
if (i915_inject_load_failure())
|
||||
@ -973,7 +973,7 @@ put_bridge:
|
||||
*/
|
||||
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
intel_uncore_fini(dev_priv);
|
||||
i915_mmio_cleanup(dev);
|
||||
@ -1006,7 +1006,7 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
uint32_t aperture_size;
|
||||
int ret;
|
||||
@ -1125,7 +1125,7 @@ out_ggtt:
|
||||
*/
|
||||
static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
|
||||
if (dev->pdev->msi_enabled)
|
||||
@ -1146,7 +1146,7 @@ static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
static void i915_driver_register(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
i915_gem_shrinker_init(dev_priv);
|
||||
|
||||
@ -1197,9 +1197,9 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
|
||||
acpi_video_unregister();
|
||||
intel_opregion_unregister(dev_priv);
|
||||
|
||||
i915_teardown_sysfs(dev_priv->dev);
|
||||
i915_teardown_sysfs(&dev_priv->drm);
|
||||
i915_debugfs_unregister(dev_priv);
|
||||
drm_dev_unregister(dev_priv->dev);
|
||||
drm_dev_unregister(&dev_priv->drm);
|
||||
|
||||
i915_gem_shrinker_cleanup(dev_priv);
|
||||
}
|
||||
@ -1236,7 +1236,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
dev_priv->drm.pdev = pdev;
|
||||
dev_priv->drm.dev_private = dev_priv;
|
||||
dev_priv->dev = &dev_priv->drm;
|
||||
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret)
|
||||
@ -1264,13 +1263,13 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
* to the role/effect of the given init step.
|
||||
*/
|
||||
if (INTEL_INFO(dev_priv)->num_pipes) {
|
||||
ret = drm_vblank_init(dev_priv->dev,
|
||||
ret = drm_vblank_init(&dev_priv->drm,
|
||||
INTEL_INFO(dev_priv)->num_pipes);
|
||||
if (ret)
|
||||
goto out_cleanup_hw;
|
||||
}
|
||||
|
||||
ret = i915_load_modeset_init(dev_priv->dev);
|
||||
ret = i915_load_modeset_init(&dev_priv->drm);
|
||||
if (ret < 0)
|
||||
goto out_cleanup_vblank;
|
||||
|
||||
@ -1283,7 +1282,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
return 0;
|
||||
|
||||
out_cleanup_vblank:
|
||||
drm_vblank_cleanup(dev_priv->dev);
|
||||
drm_vblank_cleanup(&dev_priv->drm);
|
||||
out_cleanup_hw:
|
||||
i915_driver_cleanup_hw(dev_priv);
|
||||
out_cleanup_mmio:
|
||||
@ -1402,7 +1401,7 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
|
||||
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
@ -1770,7 +1769,7 @@ int i915_resume_switcheroo(struct drm_device *dev)
|
||||
*/
|
||||
int i915_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct i915_gpu_error *error = &dev_priv->gpu_error;
|
||||
unsigned reset_counter;
|
||||
int ret;
|
||||
@ -1861,7 +1860,7 @@ static int i915_pm_suspend(struct device *dev)
|
||||
|
||||
static int i915_pm_suspend_late(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
|
||||
struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
|
||||
|
||||
/*
|
||||
* We have a suspend ordering issue with the snd-hda driver also
|
||||
@ -1880,7 +1879,7 @@ static int i915_pm_suspend_late(struct device *dev)
|
||||
|
||||
static int i915_pm_poweroff_late(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
|
||||
struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
|
||||
|
||||
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
@ -1890,7 +1889,7 @@ static int i915_pm_poweroff_late(struct device *dev)
|
||||
|
||||
static int i915_pm_resume_early(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
|
||||
struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
|
||||
|
||||
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
@ -1900,7 +1899,7 @@ static int i915_pm_resume_early(struct device *dev)
|
||||
|
||||
static int i915_pm_resume(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
|
||||
struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
|
||||
|
||||
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
@ -2278,7 +2277,7 @@ err1:
|
||||
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
|
||||
bool rpm_resume)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int err;
|
||||
int ret;
|
||||
|
||||
|
@ -320,15 +320,16 @@ struct i915_hotplug {
|
||||
for_each_if ((__ports_mask) & (1 << (__port)))
|
||||
|
||||
#define for_each_crtc(dev, crtc) \
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
|
||||
|
||||
#define for_each_intel_plane(dev, intel_plane) \
|
||||
list_for_each_entry(intel_plane, \
|
||||
&dev->mode_config.plane_list, \
|
||||
&(dev)->mode_config.plane_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
|
||||
list_for_each_entry(intel_plane, &dev->mode_config.plane_list, \
|
||||
list_for_each_entry(intel_plane, \
|
||||
&(dev)->mode_config.plane_list, \
|
||||
base.head) \
|
||||
for_each_if ((plane_mask) & \
|
||||
(1 << drm_plane_index(&intel_plane->base)))
|
||||
@ -339,11 +340,15 @@ struct i915_hotplug {
|
||||
base.head) \
|
||||
for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
|
||||
|
||||
#define for_each_intel_crtc(dev, intel_crtc) \
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
|
||||
#define for_each_intel_crtc(dev, intel_crtc) \
|
||||
list_for_each_entry(intel_crtc, \
|
||||
&(dev)->mode_config.crtc_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
|
||||
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) \
|
||||
#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
|
||||
list_for_each_entry(intel_crtc, \
|
||||
&(dev)->mode_config.crtc_list, \
|
||||
base.head) \
|
||||
for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
|
||||
|
||||
#define for_each_intel_encoder(dev, intel_encoder) \
|
||||
@ -353,7 +358,7 @@ struct i915_hotplug {
|
||||
|
||||
#define for_each_intel_connector(dev, intel_connector) \
|
||||
list_for_each_entry(intel_connector, \
|
||||
&dev->mode_config.connector_list, \
|
||||
&(dev)->mode_config.connector_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
|
||||
@ -1718,7 +1723,6 @@ struct intel_wm_config {
|
||||
struct drm_i915_private {
|
||||
struct drm_device drm;
|
||||
|
||||
struct drm_device *dev;
|
||||
struct kmem_cache *objects;
|
||||
struct kmem_cache *vmas;
|
||||
struct kmem_cache *requests;
|
||||
|
@ -1651,7 +1651,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_gem_request *tmp;
|
||||
|
||||
lockdep_assert_held(&engine->i915->dev->struct_mutex);
|
||||
lockdep_assert_held(&engine->i915->drm.struct_mutex);
|
||||
|
||||
if (list_empty(&req->list))
|
||||
return;
|
||||
@ -1680,7 +1680,7 @@ i915_wait_request(struct drm_i915_gem_request *req)
|
||||
|
||||
interruptible = dev_priv->mm.interruptible;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
|
||||
BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex));
|
||||
|
||||
ret = __i915_wait_request(req, interruptible, NULL, NULL);
|
||||
if (ret)
|
||||
@ -3254,7 +3254,7 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
lockdep_assert_held(&dev_priv->dev->struct_mutex);
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
if (dev_priv->gt.active_engines == 0)
|
||||
return;
|
||||
@ -3278,7 +3278,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), gt.retire_work.work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
/* Come back later if the device is busy... */
|
||||
if (mutex_trylock(&dev->struct_mutex)) {
|
||||
@ -3301,7 +3301,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), gt.idle_work.work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_engine_cs *engine;
|
||||
unsigned int stuck_engines;
|
||||
bool rearm_hangcheck;
|
||||
@ -3713,7 +3713,7 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
|
||||
struct intel_engine_cs *engine;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev_priv->dev->struct_mutex);
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
for_each_engine(engine, dev_priv) {
|
||||
if (engine->last_context == NULL)
|
||||
@ -5252,7 +5252,7 @@ init_engine_lists(struct intel_engine_cs *engine)
|
||||
void
|
||||
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
|
||||
!IS_CHERRYVIEW(dev_priv))
|
||||
|
@ -154,7 +154,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
||||
struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&ctx->i915->dev->struct_mutex);
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
trace_i915_context_free(ctx);
|
||||
|
||||
/*
|
||||
@ -465,7 +465,7 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
lockdep_assert_held(&dev_priv->dev->struct_mutex);
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
for_each_engine(engine, dev_priv) {
|
||||
if (engine->last_context) {
|
||||
@ -895,7 +895,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
|
||||
WARN_ON(i915.enable_execlists);
|
||||
lockdep_assert_held(&req->i915->dev->struct_mutex);
|
||||
lockdep_assert_held(&req->i915->drm.struct_mutex);
|
||||
|
||||
if (!req->ctx->engine[engine->id].state) {
|
||||
struct i915_gem_context *to = req->ctx;
|
||||
|
@ -1328,10 +1328,10 @@ gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
|
||||
/* Check whether the file_priv has already selected one ring. */
|
||||
if ((int)file_priv->bsd_ring < 0) {
|
||||
/* If not, use the ping-pong mechanism to select one. */
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
|
||||
dev_priv->mm.bsd_ring_dispatch_index ^= 1;
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
||||
return file_priv->bsd_ring;
|
||||
|
@ -153,7 +153,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
||||
#endif
|
||||
|
||||
/* Early VLV doesn't have this */
|
||||
if (IS_VALLEYVIEW(dev_priv) && dev_priv->dev->pdev->revision < 0xb) {
|
||||
if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
|
||||
DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
|
||||
return 0;
|
||||
}
|
||||
@ -2115,7 +2115,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
|
||||
struct drm_i915_private *dev_priv)
|
||||
{
|
||||
drm_mm_init(&vm->mm, vm->start, vm->total);
|
||||
vm->dev = dev_priv->dev;
|
||||
vm->dev = &dev_priv->drm;
|
||||
INIT_LIST_HEAD(&vm->active_list);
|
||||
INIT_LIST_HEAD(&vm->inactive_list);
|
||||
list_add_tail(&vm->global_link, &dev_priv->vm_list);
|
||||
@ -3179,7 +3179,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int ret;
|
||||
|
||||
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
|
||||
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
|
||||
if (!ret) {
|
||||
DRM_ERROR("failed to set up gmch\n");
|
||||
return -EIO;
|
||||
@ -3188,7 +3188,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
|
||||
intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
|
||||
&ggtt->mappable_base, &ggtt->mappable_end);
|
||||
|
||||
ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
|
||||
ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm);
|
||||
ggtt->base.insert_page = i915_ggtt_insert_page;
|
||||
ggtt->base.insert_entries = i915_ggtt_insert_entries;
|
||||
ggtt->base.clear_range = i915_ggtt_clear_range;
|
||||
|
@ -58,7 +58,7 @@ static int render_state_init(struct render_state *so,
|
||||
if (so->rodata->batch_items * 4 > 4096)
|
||||
return -EINVAL;
|
||||
|
||||
so->obj = i915_gem_object_create(dev_priv->dev, 4096);
|
||||
so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
|
||||
if (IS_ERR(so->obj))
|
||||
return PTR_ERR(so->obj);
|
||||
|
||||
|
@ -257,7 +257,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long count;
|
||||
bool unlock;
|
||||
@ -288,7 +288,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
unsigned long freed;
|
||||
bool unlock;
|
||||
|
||||
@ -323,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
|
||||
|
||||
while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) {
|
||||
while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
|
||||
schedule_timeout_killable(1);
|
||||
if (fatal_signal_pending(current))
|
||||
return false;
|
||||
@ -344,7 +344,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
dev_priv->mm.interruptible = slu->was_interruptible;
|
||||
if (slu->unlock)
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1276,7 +1276,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
|
||||
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
|
||||
struct drm_i915_error_state *error)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int i;
|
||||
|
||||
/* General organization
|
||||
@ -1446,7 +1446,8 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
|
||||
DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
|
||||
DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
|
||||
DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
|
||||
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev_priv->dev->primary->index);
|
||||
DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
|
||||
dev_priv->drm.primary->index);
|
||||
warned = true;
|
||||
}
|
||||
}
|
||||
|
@ -622,7 +622,7 @@ gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = i915_gem_object_create(dev_priv->dev, size);
|
||||
obj = i915_gem_object_create(&dev_priv->drm, size);
|
||||
if (IS_ERR(obj))
|
||||
return NULL;
|
||||
|
||||
|
@ -378,7 +378,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
|
||||
~dev_priv->pm_rps_events);
|
||||
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
synchronize_irq(dev_priv->dev->irq);
|
||||
synchronize_irq(dev_priv->drm.irq);
|
||||
|
||||
/* Now that we will not be generating any more work, flush any
|
||||
* outsanding tasks. As we are called on the RPS idle path,
|
||||
@ -566,7 +566,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
u32 enable_mask;
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
|
||||
enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
|
||||
status_mask);
|
||||
else
|
||||
enable_mask = status_mask << 16;
|
||||
@ -580,7 +580,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
u32 enable_mask;
|
||||
|
||||
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
|
||||
enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
|
||||
status_mask);
|
||||
else
|
||||
enable_mask = status_mask << 16;
|
||||
@ -1175,7 +1175,7 @@ static void ivybridge_parity_work(struct work_struct *work)
|
||||
* In order to prevent a get/put style interface, acquire struct mutex
|
||||
* any time we access those registers.
|
||||
*/
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
/* If we've screwed up tracking, just let the interrupt fire again */
|
||||
if (WARN_ON(!dev_priv->l3_parity.which_slice))
|
||||
@ -1211,7 +1211,7 @@ static void ivybridge_parity_work(struct work_struct *work)
|
||||
parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
|
||||
parity_event[5] = NULL;
|
||||
|
||||
kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
|
||||
kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
|
||||
KOBJ_CHANGE, parity_event);
|
||||
|
||||
DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
|
||||
@ -1231,7 +1231,7 @@ out:
|
||||
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
||||
static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
|
||||
@ -1513,7 +1513,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
|
||||
|
||||
entry = &pipe_crc->entries[head];
|
||||
|
||||
entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev,
|
||||
entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
|
||||
pipe);
|
||||
entry->crc[0] = crc0;
|
||||
entry->crc[1] = crc1;
|
||||
@ -1611,7 +1611,7 @@ static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
bool ret;
|
||||
|
||||
ret = drm_handle_vblank(dev_priv->dev, pipe);
|
||||
ret = drm_handle_vblank(&dev_priv->drm, pipe);
|
||||
if (ret)
|
||||
intel_finish_page_flip_mmio(dev_priv, pipe);
|
||||
|
||||
@ -2500,7 +2500,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
|
||||
struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
|
||||
char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
|
||||
char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
|
||||
char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
|
||||
@ -3402,7 +3402,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
/* make sure we're done processing display irqs */
|
||||
synchronize_irq(dev_priv->dev->irq);
|
||||
synchronize_irq(dev_priv->drm.irq);
|
||||
}
|
||||
|
||||
static void cherryview_irq_preinstall(struct drm_device *dev)
|
||||
@ -3428,7 +3428,7 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
|
||||
struct intel_encoder *encoder;
|
||||
u32 enabled_irqs = 0;
|
||||
|
||||
for_each_intel_encoder(dev_priv->dev, encoder)
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder)
|
||||
if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
|
||||
enabled_irqs |= hpd[encoder->hpd_pin];
|
||||
|
||||
@ -4510,7 +4510,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
|
||||
*/
|
||||
void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
intel_hpd_init_work(dev_priv);
|
||||
|
||||
@ -4644,7 +4644,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
dev_priv->pm.irqs_enabled = true;
|
||||
|
||||
return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
|
||||
return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4656,7 +4656,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void intel_irq_uninstall(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
drm_irq_uninstall(dev_priv->dev);
|
||||
drm_irq_uninstall(&dev_priv->drm);
|
||||
intel_hpd_cancel_work(dev_priv);
|
||||
dev_priv->pm.irqs_enabled = false;
|
||||
}
|
||||
@ -4670,9 +4670,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
|
||||
dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
|
||||
dev_priv->pm.irqs_enabled = false;
|
||||
synchronize_irq(dev_priv->dev->irq);
|
||||
synchronize_irq(dev_priv->drm.irq);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4685,6 +4685,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
|
||||
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
dev_priv->pm.irqs_enabled = true;
|
||||
dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
|
||||
dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
|
||||
dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
|
||||
dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ TRACE_EVENT(i915_gem_shrink,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = i915->dev->primary->index;
|
||||
__entry->dev = i915->drm.primary->index;
|
||||
__entry->target = target;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = from->i915->dev->primary->index;
|
||||
__entry->dev = from->i915->drm.primary->index;
|
||||
__entry->sync_from = from->id;
|
||||
__entry->sync_to = to_req->engine->id;
|
||||
__entry->seqno = i915_gem_request_get_seqno(req);
|
||||
@ -486,7 +486,7 @@ TRACE_EVENT(i915_gem_ring_dispatch,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->i915->dev->primary->index;
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->seqno = req->seqno;
|
||||
__entry->flags = flags;
|
||||
@ -509,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->i915->dev->primary->index;
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->invalidate = invalidate;
|
||||
__entry->flush = flush;
|
||||
@ -531,7 +531,7 @@ DECLARE_EVENT_CLASS(i915_gem_request,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->i915->dev->primary->index;
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->seqno = req->seqno;
|
||||
),
|
||||
@ -556,7 +556,7 @@ TRACE_EVENT(i915_gem_request_notify,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = engine->i915->dev->primary->index;
|
||||
__entry->dev = engine->i915->drm.primary->index;
|
||||
__entry->ring = engine->id;
|
||||
__entry->seqno = intel_engine_get_seqno(engine);
|
||||
),
|
||||
@ -593,11 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
|
||||
* less desirable.
|
||||
*/
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->i915->dev->primary->index;
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->seqno = req->seqno;
|
||||
__entry->blocking =
|
||||
mutex_is_locked(&req->i915->dev->struct_mutex);
|
||||
mutex_is_locked(&req->i915->drm.struct_mutex);
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
|
||||
@ -746,7 +746,7 @@ DECLARE_EVENT_CLASS(i915_context,
|
||||
TP_fast_assign(
|
||||
__entry->ctx = ctx;
|
||||
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
|
||||
__entry->dev = ctx->i915->dev->primary->index;
|
||||
__entry->dev = ctx->i915->drm.primary->index;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
|
||||
@ -786,7 +786,7 @@ TRACE_EVENT(switch_mm,
|
||||
__entry->ring = engine->id;
|
||||
__entry->to = to;
|
||||
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
|
||||
__entry->dev = engine->i915->dev->primary->index;
|
||||
__entry->dev = engine->i915->drm.primary->index;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
|
||||
|
@ -749,14 +749,14 @@ static int i915_audio_component_bind(struct device *i915_dev,
|
||||
if (WARN_ON(acomp->ops || acomp->dev))
|
||||
return -EEXIST;
|
||||
|
||||
drm_modeset_lock_all(dev_priv->dev);
|
||||
drm_modeset_lock_all(&dev_priv->drm);
|
||||
acomp->ops = &i915_audio_component_ops;
|
||||
acomp->dev = i915_dev;
|
||||
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
|
||||
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
|
||||
acomp->aud_sample_rate[i] = 0;
|
||||
dev_priv->audio_component = acomp;
|
||||
drm_modeset_unlock_all(dev_priv->dev);
|
||||
drm_modeset_unlock_all(&dev_priv->drm);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -767,11 +767,11 @@ static void i915_audio_component_unbind(struct device *i915_dev,
|
||||
struct i915_audio_component *acomp = data;
|
||||
struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
|
||||
|
||||
drm_modeset_lock_all(dev_priv->dev);
|
||||
drm_modeset_lock_all(&dev_priv->drm);
|
||||
acomp->ops = NULL;
|
||||
acomp->dev = NULL;
|
||||
dev_priv->audio_component = NULL;
|
||||
drm_modeset_unlock_all(dev_priv->dev);
|
||||
drm_modeset_unlock_all(&dev_priv->drm);
|
||||
}
|
||||
|
||||
static const struct component_ops i915_audio_component_bind_ops = {
|
||||
@ -799,7 +799,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops);
|
||||
ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to add audio component (%d)\n", ret);
|
||||
/* continue with reduced functionality */
|
||||
@ -821,6 +821,6 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
|
||||
if (!dev_priv->audio_component_registered)
|
||||
return;
|
||||
|
||||
component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops);
|
||||
component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
|
||||
dev_priv->audio_component_registered = false;
|
||||
}
|
||||
|
@ -1426,7 +1426,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
|
||||
int
|
||||
intel_bios_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
const struct vbt_header *vbt = dev_priv->opregion.vbt;
|
||||
const struct bdb_header *bdb;
|
||||
u8 __iomem *bios = NULL;
|
||||
|
@ -412,7 +412,7 @@ static void csr_load_work_fn(struct work_struct *work)
|
||||
csr = &dev_priv->csr;
|
||||
|
||||
ret = request_firmware(&fw, dev_priv->csr.fw_path,
|
||||
&dev_priv->dev->pdev->dev);
|
||||
&dev_priv->drm.pdev->dev);
|
||||
if (fw)
|
||||
dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
|
||||
|
||||
@ -426,7 +426,7 @@ static void csr_load_work_fn(struct work_struct *work)
|
||||
CSR_VERSION_MAJOR(csr->version),
|
||||
CSR_VERSION_MINOR(csr->version));
|
||||
} else {
|
||||
dev_notice(dev_priv->dev->dev,
|
||||
dev_notice(dev_priv->drm.dev,
|
||||
"Failed to load DMC firmware"
|
||||
" [" FIRMWARE_URL "],"
|
||||
" disabling runtime power management.\n");
|
||||
|
@ -1235,7 +1235,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
|
||||
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
i915_reg_t pp_reg;
|
||||
u32 val;
|
||||
enum pipe panel_pipe = PIPE_A;
|
||||
@ -1277,7 +1277,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
|
||||
static void assert_cursor(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
bool cur_state;
|
||||
|
||||
if (IS_845G(dev) || IS_I865G(dev))
|
||||
@ -1339,7 +1339,7 @@ static void assert_plane(struct drm_i915_private *dev_priv,
|
||||
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int i;
|
||||
|
||||
/* Primary planes are fixed to pipes on gen4+ */
|
||||
@ -1365,7 +1365,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
|
||||
static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int sprite;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9) {
|
||||
@ -1830,7 +1830,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
|
||||
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
i915_reg_t reg;
|
||||
@ -1921,7 +1921,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
|
||||
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
i915_reg_t reg;
|
||||
uint32_t val;
|
||||
|
||||
@ -3137,7 +3137,7 @@ static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
for_each_intel_crtc(dev_priv->dev, crtc)
|
||||
for_each_intel_crtc(&dev_priv->drm, crtc)
|
||||
intel_finish_page_flip_cs(dev_priv, crtc->pipe);
|
||||
}
|
||||
|
||||
@ -3171,12 +3171,12 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
|
||||
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
|
||||
return;
|
||||
|
||||
drm_modeset_lock_all(dev_priv->dev);
|
||||
drm_modeset_lock_all(&dev_priv->drm);
|
||||
/*
|
||||
* Disabling the crtcs gracefully seems nicer. Also the
|
||||
* g33 docs say we should at least disable all the planes.
|
||||
*/
|
||||
intel_display_suspend(dev_priv->dev);
|
||||
intel_display_suspend(&dev_priv->drm);
|
||||
}
|
||||
|
||||
void intel_finish_reset(struct drm_i915_private *dev_priv)
|
||||
@ -3203,7 +3203,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
|
||||
* FIXME: Atomic will make this obsolete since we won't schedule
|
||||
* CS-based flips (which might get lost in gpu resets) any more.
|
||||
*/
|
||||
intel_update_primary_planes(dev_priv->dev);
|
||||
intel_update_primary_planes(&dev_priv->drm);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3214,18 +3214,18 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
|
||||
intel_runtime_pm_disable_interrupts(dev_priv);
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
|
||||
intel_modeset_init_hw(dev_priv->dev);
|
||||
intel_modeset_init_hw(&dev_priv->drm);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->display.hpd_irq_setup)
|
||||
dev_priv->display.hpd_irq_setup(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
intel_display_resume(dev_priv->dev);
|
||||
intel_display_resume(&dev_priv->drm);
|
||||
|
||||
intel_hpd_init(dev_priv);
|
||||
|
||||
drm_modeset_unlock_all(dev_priv->dev);
|
||||
drm_modeset_unlock_all(&dev_priv->drm);
|
||||
}
|
||||
|
||||
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
|
||||
@ -5524,14 +5524,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
|
||||
return;
|
||||
}
|
||||
|
||||
intel_update_cdclk(dev_priv->dev);
|
||||
intel_update_cdclk(&dev_priv->drm);
|
||||
}
|
||||
|
||||
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 cdctl, expected;
|
||||
|
||||
intel_update_cdclk(dev_priv->dev);
|
||||
intel_update_cdclk(&dev_priv->drm);
|
||||
|
||||
if (dev_priv->cdclk_pll.vco == 0 ||
|
||||
dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
|
||||
@ -5664,7 +5664,7 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
|
||||
dev_priv->skl_preferred_vco_freq = vco;
|
||||
|
||||
if (changed)
|
||||
intel_update_max_cdclk(dev_priv->dev);
|
||||
intel_update_max_cdclk(&dev_priv->drm);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -5758,7 +5758,7 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
|
||||
|
||||
static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
u32 freq_select, pcu_ack;
|
||||
|
||||
WARN_ON((cdclk == 24000) != (vco == 0));
|
||||
@ -5856,7 +5856,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
|
||||
if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
|
||||
goto sanitize;
|
||||
|
||||
intel_update_cdclk(dev_priv->dev);
|
||||
intel_update_cdclk(&dev_priv->drm);
|
||||
/* Is PLL enabled and locked ? */
|
||||
if (dev_priv->cdclk_pll.vco == 0 ||
|
||||
dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
|
||||
@ -9488,7 +9488,7 @@ out:
|
||||
|
||||
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
for_each_intel_crtc(dev, crtc)
|
||||
@ -9522,7 +9522,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
|
||||
|
||||
static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
return I915_READ(D_COMP_HSW);
|
||||
@ -9532,7 +9532,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
|
||||
|
||||
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
mutex_lock(&dev_priv->rps.hw_lock);
|
||||
@ -9649,7 +9649,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
|
||||
}
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
intel_update_cdclk(dev_priv->dev);
|
||||
intel_update_cdclk(&dev_priv->drm);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -9677,7 +9677,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
uint32_t val;
|
||||
|
||||
DRM_DEBUG_KMS("Enabling package C8+\n");
|
||||
@ -9694,7 +9694,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
|
||||
|
||||
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
uint32_t val;
|
||||
|
||||
DRM_DEBUG_KMS("Disabling package C8+\n");
|
||||
@ -11142,7 +11142,7 @@ static bool pageflip_finished(struct intel_crtc *crtc,
|
||||
|
||||
void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_flip_work *work;
|
||||
@ -11169,7 +11169,7 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
|
||||
|
||||
void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_flip_work *work;
|
||||
@ -11628,7 +11628,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
|
||||
|
||||
void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_flip_work *work;
|
||||
|
@ -540,7 +540,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
|
||||
|
||||
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
|
||||
@ -2286,7 +2286,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
|
||||
* 2. Program DP PLL enable
|
||||
*/
|
||||
if (IS_GEN5(dev_priv))
|
||||
intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
|
||||
intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
|
||||
|
||||
intel_dp->DP |= DP_PLL_ENABLE;
|
||||
|
||||
@ -3387,7 +3387,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
||||
I915_WRITE(intel_dp->output_reg, DP);
|
||||
POSTING_READ(intel_dp->output_reg);
|
||||
|
||||
intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
|
||||
intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
}
|
||||
@ -5147,9 +5147,9 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp)
|
||||
}
|
||||
|
||||
if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
|
||||
intel_dp_set_drrs_state(dev_priv->dev,
|
||||
intel_dp->attached_connector->panel.
|
||||
fixed_mode->vrefresh);
|
||||
intel_dp_set_drrs_state(&dev_priv->drm,
|
||||
intel_dp->attached_connector->panel.
|
||||
fixed_mode->vrefresh);
|
||||
|
||||
dev_priv->drrs.dp = NULL;
|
||||
mutex_unlock(&dev_priv->drrs.mutex);
|
||||
@ -5179,9 +5179,9 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
|
||||
goto unlock;
|
||||
|
||||
if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
|
||||
intel_dp_set_drrs_state(dev_priv->dev,
|
||||
intel_dp->attached_connector->panel.
|
||||
downclock_mode->vrefresh);
|
||||
intel_dp_set_drrs_state(&dev_priv->drm,
|
||||
intel_dp->attached_connector->panel.
|
||||
downclock_mode->vrefresh);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&dev_priv->drrs.mutex);
|
||||
@ -5223,9 +5223,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
|
||||
|
||||
/* invalidate means busy screen hence upclock */
|
||||
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
|
||||
intel_dp_set_drrs_state(dev_priv->dev,
|
||||
dev_priv->drrs.dp->attached_connector->panel.
|
||||
fixed_mode->vrefresh);
|
||||
intel_dp_set_drrs_state(&dev_priv->drm,
|
||||
dev_priv->drrs.dp->attached_connector->panel.
|
||||
fixed_mode->vrefresh);
|
||||
|
||||
mutex_unlock(&dev_priv->drrs.mutex);
|
||||
}
|
||||
@ -5268,9 +5268,9 @@ void intel_edp_drrs_flush(struct drm_device *dev,
|
||||
|
||||
/* flush means busy screen hence upclock */
|
||||
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
|
||||
intel_dp_set_drrs_state(dev_priv->dev,
|
||||
dev_priv->drrs.dp->attached_connector->panel.
|
||||
fixed_mode->vrefresh);
|
||||
intel_dp_set_drrs_state(&dev_priv->drm,
|
||||
dev_priv->drrs.dp->attached_connector->panel.
|
||||
fixed_mode->vrefresh);
|
||||
|
||||
/*
|
||||
* flush also means no more activity hence schedule downclock, if all
|
||||
|
@ -331,7 +331,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
|
||||
static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
/* Make sure no transcoder isn't still depending on us. */
|
||||
|
@ -392,7 +392,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
struct intel_fbc_work *work = &fbc->work;
|
||||
struct intel_crtc *crtc = fbc->crtc;
|
||||
struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
|
||||
struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
|
||||
|
||||
if (drm_crtc_vblank_get(&crtc->base)) {
|
||||
DRM_ERROR("vblank not available for FBC on pipe %c\n",
|
||||
@ -1210,7 +1210,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
|
||||
if (!no_fbc_on_multiple_pipes(dev_priv))
|
||||
return;
|
||||
|
||||
for_each_intel_crtc(dev_priv->dev, crtc)
|
||||
for_each_intel_crtc(&dev_priv->drm, crtc)
|
||||
if (intel_crtc_active(&crtc->base) &&
|
||||
to_intel_plane_state(crtc->base.primary->state)->visible)
|
||||
dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
|
||||
|
@ -693,9 +693,9 @@ out:
|
||||
|
||||
static void intel_fbdev_suspend_worker(struct work_struct *work)
|
||||
{
|
||||
intel_fbdev_set_suspend(container_of(work,
|
||||
struct drm_i915_private,
|
||||
fbdev_suspend_work)->dev,
|
||||
intel_fbdev_set_suspend(&container_of(work,
|
||||
struct drm_i915_private,
|
||||
fbdev_suspend_work)->drm,
|
||||
FBINFO_STATE_RUNNING,
|
||||
true);
|
||||
}
|
||||
|
@ -289,7 +289,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
bool ret;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, flags);
|
||||
ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
|
||||
ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
|
||||
enable);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
|
||||
@ -334,10 +334,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
intel_crtc->pch_fifo_underrun_disabled = !enable;
|
||||
|
||||
if (HAS_PCH_IBX(dev_priv))
|
||||
ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
|
||||
ibx_set_fifo_underrun_reporting(&dev_priv->drm,
|
||||
pch_transcoder,
|
||||
enable);
|
||||
else
|
||||
cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
|
||||
cpt_set_fifo_underrun_reporting(&dev_priv->drm,
|
||||
pch_transcoder,
|
||||
enable, old);
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
|
||||
@ -405,7 +407,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
for_each_intel_crtc(dev_priv->dev, crtc) {
|
||||
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
||||
if (crtc->cpu_fifo_underrun_disabled)
|
||||
continue;
|
||||
|
||||
@ -432,7 +434,7 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
||||
for_each_intel_crtc(dev_priv->dev, crtc) {
|
||||
for_each_intel_crtc(&dev_priv->drm, crtc) {
|
||||
if (crtc->pch_fifo_underrun_disabled)
|
||||
continue;
|
||||
|
||||
|
@ -314,7 +314,7 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
|
||||
static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
|
||||
|
@ -1154,7 +1154,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
|
||||
I915_WRITE(intel_hdmi->hdmi_reg, temp);
|
||||
POSTING_READ(intel_hdmi->hdmi_reg);
|
||||
|
||||
intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
|
||||
intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
|
||||
|
||||
static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_connector *intel_connector;
|
||||
struct intel_encoder *intel_encoder;
|
||||
@ -191,7 +191,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv),
|
||||
hotplug.reenable_work.work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
int i;
|
||||
|
||||
@ -302,7 +302,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, struct drm_i915_private, hotplug.hotplug_work);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct intel_connector *intel_connector;
|
||||
struct intel_encoder *intel_encoder;
|
||||
@ -455,7 +455,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
||||
*/
|
||||
void intel_hpd_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_connector *connector;
|
||||
int i;
|
||||
|
@ -138,7 +138,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
|
||||
static u32 get_reserved(struct intel_gmbus *bus)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
u32 reserved = 0;
|
||||
|
||||
/* On most chips, these bits must be preserved in software. */
|
||||
@ -212,7 +212,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
|
||||
adapter);
|
||||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
|
||||
intel_i2c_reset(dev_priv->dev);
|
||||
intel_i2c_reset(&dev_priv->drm);
|
||||
intel_i2c_quirk_set(dev_priv, true);
|
||||
set_data(bus, 1);
|
||||
set_clock(bus, 1);
|
||||
@ -688,7 +688,7 @@ int intel_setup_gmbus(struct drm_device *dev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
intel_i2c_reset(dev_priv->dev);
|
||||
intel_i2c_reset(&dev_priv->drm);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -899,7 +899,7 @@ void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
|
||||
struct drm_i915_gem_request *req, *tmp;
|
||||
LIST_HEAD(cancel_list);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&engine->i915->dev->struct_mutex));
|
||||
WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
|
||||
|
||||
spin_lock_bh(&engine->execlist_lock);
|
||||
list_replace_init(&engine->execlist_queue, &cancel_list);
|
||||
@ -961,7 +961,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
|
||||
u32 *lrc_reg_state;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ctx->i915->dev->struct_mutex);
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
|
||||
if (ce->pin_count++)
|
||||
return 0;
|
||||
@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
|
||||
{
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
|
||||
lockdep_assert_held(&ctx->i915->dev->struct_mutex);
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
GEM_BUG_ON(ce->pin_count == 0);
|
||||
|
||||
if (--ce->pin_count)
|
||||
@ -1353,8 +1353,8 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
engine->wa_ctx.obj = i915_gem_object_create(engine->i915->dev,
|
||||
PAGE_ALIGN(size));
|
||||
engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
|
||||
PAGE_ALIGN(size));
|
||||
if (IS_ERR(engine->wa_ctx.obj)) {
|
||||
DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
|
||||
ret = PTR_ERR(engine->wa_ctx.obj);
|
||||
@ -2154,7 +2154,7 @@ logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
|
||||
logical_ring_default_irqs(engine, info->irq_shift);
|
||||
|
||||
intel_engine_init_hangcheck(engine);
|
||||
i915_gem_batch_pool_init(dev_priv->dev, &engine->batch_pool);
|
||||
i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool);
|
||||
|
||||
return engine;
|
||||
}
|
||||
@ -2486,7 +2486,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
|
||||
/* One extra page as the sharing data between driver and GuC */
|
||||
context_size += PAGE_SIZE * LRC_PPHWSP_PN;
|
||||
|
||||
ctx_obj = i915_gem_object_create(ctx->i915->dev, context_size);
|
||||
ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
|
||||
if (IS_ERR(ctx_obj)) {
|
||||
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
|
||||
return PTR_ERR(ctx_obj);
|
||||
|
@ -261,7 +261,7 @@ static int swsci(struct drm_i915_private *dev_priv,
|
||||
u32 function, u32 parm, u32 *parm_out)
|
||||
{
|
||||
struct opregion_swsci *swsci = dev_priv->opregion.swsci;
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
u32 main_function, sub_function, scic;
|
||||
u16 swsci_val;
|
||||
u32 dslp;
|
||||
@ -435,7 +435,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
|
||||
{
|
||||
struct intel_connector *connector;
|
||||
struct opregion_asle *asle = dev_priv->opregion.asle;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
|
||||
|
||||
@ -718,7 +718,7 @@ static u32 acpi_display_type(struct drm_connector *connector)
|
||||
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_opregion *opregion = &dev_priv->opregion;
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
struct drm_connector *connector;
|
||||
acpi_handle handle;
|
||||
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
|
||||
@ -782,7 +782,8 @@ end:
|
||||
|
||||
blind_set:
|
||||
i = 0;
|
||||
list_for_each_entry(connector, &dev_priv->dev->mode_config.connector_list, head) {
|
||||
list_for_each_entry(connector,
|
||||
&dev_priv->drm.mode_config.connector_list, head) {
|
||||
int display_type = acpi_display_type(connector);
|
||||
|
||||
if (i >= max_outputs) {
|
||||
@ -954,7 +955,7 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
|
||||
int intel_opregion_setup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_opregion *opregion = &dev_priv->opregion;
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
u32 asls, mboxes;
|
||||
char buf[sizeof(OPREGION_SIGNATURE)];
|
||||
int err = 0;
|
||||
|
@ -409,7 +409,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
||||
struct intel_engine_cs *engine = &dev_priv->engine[RCS];
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev_priv->dev->struct_mutex);
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
/* Only wait if there is actually an old frame to release to
|
||||
* guarantee forward progress.
|
||||
@ -741,8 +741,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||
u32 swidth, swidthsw, sheight, ostride;
|
||||
enum pipe pipe = overlay->crtc->pipe;
|
||||
|
||||
lockdep_assert_held(&dev_priv->dev->struct_mutex);
|
||||
WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
|
||||
|
||||
ret = intel_overlay_release_old_vid(overlay);
|
||||
if (ret != 0)
|
||||
@ -836,7 +836,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
||||
overlay->old_vid_bo = overlay->vid_bo;
|
||||
overlay->vid_bo = new_bo;
|
||||
|
||||
intel_frontbuffer_flip(dev_priv->dev, INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
intel_frontbuffer_flip(&dev_priv->drm,
|
||||
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
|
||||
return 0;
|
||||
|
||||
@ -851,8 +852,8 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
|
||||
struct overlay_registers __iomem *regs;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev_priv->dev->struct_mutex);
|
||||
WARN_ON(!drm_modeset_is_locked(&dev_priv->dev->mode_config.connection_mutex));
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
|
||||
|
||||
ret = intel_overlay_recover_from_interrupt(overlay);
|
||||
if (ret != 0)
|
||||
@ -1379,7 +1380,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
|
||||
if (!overlay)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
if (WARN_ON(dev_priv->overlay))
|
||||
goto out_free;
|
||||
|
||||
@ -1387,9 +1388,10 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
|
||||
|
||||
reg_bo = NULL;
|
||||
if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
|
||||
reg_bo = i915_gem_object_create_stolen(dev_priv->dev, PAGE_SIZE);
|
||||
reg_bo = i915_gem_object_create_stolen(&dev_priv->drm,
|
||||
PAGE_SIZE);
|
||||
if (reg_bo == NULL)
|
||||
reg_bo = i915_gem_object_create(dev_priv->dev, PAGE_SIZE);
|
||||
reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
|
||||
if (IS_ERR(reg_bo))
|
||||
goto out_free;
|
||||
overlay->reg_bo = reg_bo;
|
||||
@ -1434,7 +1436,7 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
|
||||
intel_overlay_unmap_regs(overlay, regs);
|
||||
|
||||
dev_priv->overlay = overlay;
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
DRM_INFO("initialized overlay support\n");
|
||||
return;
|
||||
|
||||
@ -1444,7 +1446,7 @@ out_unpin_bo:
|
||||
out_free_bo:
|
||||
drm_gem_object_unreference(®_bo->base);
|
||||
out_free:
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
kfree(overlay);
|
||||
return;
|
||||
}
|
||||
|
@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
|
||||
if (panel->backlight.combination_mode) {
|
||||
u8 lbpc;
|
||||
|
||||
pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc);
|
||||
pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc);
|
||||
val *= lbpc;
|
||||
}
|
||||
|
||||
@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
|
||||
|
||||
lbpc = level * 0xfe / panel->backlight.max + 1;
|
||||
level /= lbpc;
|
||||
pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc);
|
||||
pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
|
||||
}
|
||||
|
||||
if (IS_GEN4(dev_priv)) {
|
||||
@ -822,7 +822,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
|
||||
* backlight. This will leave the backlight on unnecessarily when
|
||||
* another client is not activated.
|
||||
*/
|
||||
if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
|
||||
if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
|
||||
DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
|
||||
return;
|
||||
}
|
||||
|
@ -319,7 +319,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
|
||||
|
||||
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
u32 val;
|
||||
|
||||
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
|
||||
@ -2236,7 +2236,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
|
||||
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
|
||||
uint16_t wm[5], uint16_t min)
|
||||
{
|
||||
int level, max_level = ilk_wm_max_level(dev_priv->dev);
|
||||
int level, max_level = ilk_wm_max_level(&dev_priv->drm);
|
||||
|
||||
if (wm[0] >= min)
|
||||
return false;
|
||||
@ -2765,7 +2765,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
|
||||
static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
|
||||
struct ilk_wm_values *results)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct ilk_wm_values *previous = &dev_priv->wm.hw;
|
||||
unsigned int dirty;
|
||||
uint32_t val;
|
||||
@ -3498,7 +3498,6 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
|
||||
int level,
|
||||
struct skl_wm_level *result)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_atomic_state *state = cstate->base.state;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
|
||||
struct drm_plane *plane;
|
||||
@ -3514,7 +3513,9 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
|
||||
*/
|
||||
memset(result, 0, sizeof(*result));
|
||||
|
||||
for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) {
|
||||
for_each_intel_plane_mask(&dev_priv->drm,
|
||||
intel_plane,
|
||||
cstate->base.plane_mask) {
|
||||
int i = skl_wm_plane_id(intel_plane);
|
||||
|
||||
plane = &intel_plane->base;
|
||||
@ -3682,7 +3683,7 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
|
||||
static void skl_write_wm_values(struct drm_i915_private *dev_priv,
|
||||
const struct skl_wm_values *new)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
@ -3779,7 +3780,7 @@ skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
|
||||
static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
|
||||
struct skl_wm_values *new_values)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct skl_ddb_allocation *cur_ddb, *new_ddb;
|
||||
bool reallocated[I915_MAX_PIPES] = {};
|
||||
struct intel_crtc *crtc;
|
||||
@ -4056,7 +4057,7 @@ static void ilk_compute_wm_config(struct drm_device *dev,
|
||||
|
||||
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
|
||||
struct ilk_wm_maximums max;
|
||||
struct intel_wm_config config = {};
|
||||
@ -5699,7 +5700,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
|
||||
u32 pcbr;
|
||||
int pctx_size = 24*1024;
|
||||
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
pcbr = I915_READ(VLV_PCBR);
|
||||
if (pcbr) {
|
||||
@ -5707,7 +5708,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
|
||||
int pcbr_offset;
|
||||
|
||||
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
|
||||
pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
|
||||
pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
|
||||
pcbr_offset,
|
||||
I915_GTT_OFFSET_NONE,
|
||||
pctx_size);
|
||||
@ -5724,7 +5725,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
|
||||
* overlap with other ranges, such as the frame buffer, protected
|
||||
* memory, or any other relevant ranges.
|
||||
*/
|
||||
pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size);
|
||||
pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
|
||||
if (!pctx) {
|
||||
DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
|
||||
goto out;
|
||||
@ -5736,7 +5737,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
|
||||
out:
|
||||
DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
|
||||
dev_priv->vlv_pctx = pctx;
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
|
||||
static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
|
||||
@ -6680,9 +6681,9 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
|
||||
|
||||
if (IS_IRONLAKE_M(dev_priv)) {
|
||||
ironlake_enable_drps(dev_priv);
|
||||
mutex_lock(&dev_priv->dev->struct_mutex);
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
intel_init_emon(dev_priv);
|
||||
mutex_unlock(&dev_priv->dev->struct_mutex);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
} else if (INTEL_INFO(dev_priv)->gen >= 6) {
|
||||
/*
|
||||
* PCU communication is slow and this doesn't need to be
|
||||
|
@ -657,9 +657,9 @@ int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
|
||||
|
||||
WARN_ON(engine->scratch.obj);
|
||||
|
||||
obj = i915_gem_object_create_stolen(engine->i915->dev, size);
|
||||
obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
|
||||
if (!obj)
|
||||
obj = i915_gem_object_create(engine->i915->dev, size);
|
||||
obj = i915_gem_object_create(&engine->i915->drm, size);
|
||||
if (IS_ERR(obj)) {
|
||||
DRM_ERROR("Failed to allocate scratch page\n");
|
||||
ret = PTR_ERR(obj);
|
||||
@ -1888,7 +1888,7 @@ static void cleanup_phys_status_page(struct intel_engine_cs *engine)
|
||||
if (!dev_priv->status_page_dmah)
|
||||
return;
|
||||
|
||||
drm_pci_free(dev_priv->dev, dev_priv->status_page_dmah);
|
||||
drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
|
||||
engine->status_page.page_addr = NULL;
|
||||
}
|
||||
|
||||
@ -1914,7 +1914,7 @@ static int init_status_page(struct intel_engine_cs *engine)
|
||||
unsigned flags;
|
||||
int ret;
|
||||
|
||||
obj = i915_gem_object_create(engine->i915->dev, 4096);
|
||||
obj = i915_gem_object_create(&engine->i915->drm, 4096);
|
||||
if (IS_ERR(obj)) {
|
||||
DRM_ERROR("Failed to allocate status page\n");
|
||||
return PTR_ERR(obj);
|
||||
@ -1963,7 +1963,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
|
||||
|
||||
if (!dev_priv->status_page_dmah) {
|
||||
dev_priv->status_page_dmah =
|
||||
drm_pci_alloc(dev_priv->dev, PAGE_SIZE, PAGE_SIZE);
|
||||
drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
|
||||
if (!dev_priv->status_page_dmah)
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -2096,7 +2096,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
|
||||
ring->last_retired_head = -1;
|
||||
intel_ring_update_space(ring);
|
||||
|
||||
ret = intel_alloc_ringbuffer_obj(engine->i915->dev, ring);
|
||||
ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
|
||||
engine->name, ret);
|
||||
@ -2122,7 +2122,7 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&ctx->i915->dev->struct_mutex);
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
|
||||
if (ce->pin_count++)
|
||||
return 0;
|
||||
@ -2156,7 +2156,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
|
||||
{
|
||||
struct intel_context *ce = &ctx->engine[engine->id];
|
||||
|
||||
lockdep_assert_held(&ctx->i915->dev->struct_mutex);
|
||||
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
|
||||
|
||||
if (--ce->pin_count)
|
||||
return;
|
||||
@ -2696,7 +2696,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
|
||||
return;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
|
||||
obj = i915_gem_object_create(dev_priv->dev, 4096);
|
||||
obj = i915_gem_object_create(&dev_priv->drm, 4096);
|
||||
if (IS_ERR(obj)) {
|
||||
DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
|
||||
i915.semaphores = 0;
|
||||
|
@ -287,7 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
|
||||
*/
|
||||
static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
/*
|
||||
* After we re-enable the power well, if we touch VGA register 0x3d5
|
||||
@ -318,7 +318,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
|
||||
static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
|
||||
/*
|
||||
* After we re-enable the power well, if we touch VGA register 0x3d5
|
||||
@ -930,7 +930,7 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
|
||||
WARN_ON(dev_priv->cdclk_freq !=
|
||||
dev_priv->display.get_display_clock_speed(dev_priv->dev));
|
||||
dev_priv->display.get_display_clock_speed(&dev_priv->drm));
|
||||
|
||||
gen9_assert_dbuf_enabled(dev_priv);
|
||||
|
||||
@ -1088,7 +1088,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
|
||||
*
|
||||
* CHV DPLL B/C have some issues if VGA mode is enabled.
|
||||
*/
|
||||
for_each_pipe(dev_priv->dev, pipe) {
|
||||
for_each_pipe(&dev_priv->drm, pipe) {
|
||||
u32 val = I915_READ(DPLL(pipe));
|
||||
|
||||
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
|
||||
@ -1113,7 +1113,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
|
||||
|
||||
intel_hpd_init(dev_priv);
|
||||
|
||||
i915_redisable_vga_power_on(dev_priv->dev);
|
||||
i915_redisable_vga_power_on(&dev_priv->drm);
|
||||
}
|
||||
|
||||
static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
|
||||
@ -1123,7 +1123,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
/* make sure we're done processing display irqs */
|
||||
synchronize_irq(dev_priv->dev->irq);
|
||||
synchronize_irq(dev_priv->drm.irq);
|
||||
|
||||
intel_power_sequencer_reset(dev_priv);
|
||||
}
|
||||
@ -2275,7 +2275,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void intel_power_domains_fini(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct device *device = &dev_priv->dev->pdev->dev;
|
||||
struct device *device = &dev_priv->drm.pdev->dev;
|
||||
|
||||
/*
|
||||
* The i915.ko module is still not prepared to be loaded when
|
||||
@ -2576,7 +2576,7 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
|
||||
power_domains->initializing = true;
|
||||
@ -2638,7 +2638,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
|
||||
pm_runtime_get_sync(device);
|
||||
@ -2659,7 +2659,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PM)) {
|
||||
@ -2701,7 +2701,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
@ -2720,7 +2720,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
@ -2743,7 +2743,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
|
||||
pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
|
||||
|
@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
|
||||
temp &= ~SDVO_ENABLE;
|
||||
intel_sdvo_write_sdvox(intel_sdvo, temp);
|
||||
|
||||
intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
|
||||
intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
|
||||
}
|
||||
|
@ -1471,7 +1471,7 @@ static int i915_reset_complete(struct pci_dev *pdev)
|
||||
|
||||
static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
|
||||
/* assert reset for at least 20 usec */
|
||||
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
|
||||
@ -1490,14 +1490,14 @@ static int g4x_reset_complete(struct pci_dev *pdev)
|
||||
|
||||
static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
|
||||
return wait_for(g4x_reset_complete(pdev), 500);
|
||||
}
|
||||
|
||||
static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
|
||||
{
|
||||
struct pci_dev *pdev = dev_priv->dev->pdev;
|
||||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
int ret;
|
||||
|
||||
pci_write_config_byte(pdev, I915_GDRST,
|
||||
|
Loading…
Reference in New Issue
Block a user