Merge tag 'drm-intel-next-2017-09-07' of git://anongit.freedesktop.org/git/drm-intel into drm-next

Getting started with v4.15 features:

- Cannonlake workarounds (Rodrigo, Oscar)
- Infoframe refactoring and fixes to enable infoframes for DP (Ville)
- VBT definition updates (Jani)
- Sparse warning fixes (Ville, Chris)
- Crtc state usage fixes and cleanups (Ville)
- DP vswing, pre-emph and buffer translation refactoring and fixes (Rodrigo)
- Prevent IPS from interfering with CRC capture (Ville, Marta)
- Enable Mesa to advertise ARB_timer_query (Nanley)
- Refactor GT number into intel_device_info (Lionel)
- Avoid eDP DP AUX CH timeouts harder (Manasi)
- CDCLK check improvements (Ville)
- Restore GPU clock boost on missed pageflip vblanks (Chris)
- Fence register reservation API for vGPU (Changbin)
- First batch of CCS fixes (Ville)
- Finally, numerous GEM fixes, cleanups and improvements (Chris)

* tag 'drm-intel-next-2017-09-07' of git://anongit.freedesktop.org/git/drm-intel: (100 commits)
  drm/i915: Update DRIVER_DATE to 20170907
  drm/i915/cnl: WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod)
  drm/i915: Lift has-pinned-pages assert to caller of ____i915_gem_object_get_pages
  drm/i915: Display WA #1133 WaFbcSkipSegments:cnl, glk
  drm/i915/cnl: Allow the reg_read ioctl to read the RCS TIMESTAMP register
  drm/i915: Move device_info.has_snoop into the static tables
  drm/i915: Disable MI_STORE_DATA_IMM for i915g/i915gm
  drm/i915: Re-enable GTT following a device reset
  drm/i915/cnp: Wa 1181: Fix Backlight issue
  drm/i915: Annotate user relocs with __user
  drm/i915: Constify load detect mode
  drm/i915/perf: Remove __user from u64 in drm_i915_perf_oa_config
  drm/i915: Silence sparse by using gfp_t
  drm/i915: io unmap functions want __iomem
  drm/i915: Add __rcu to radix tree slot pointer
  drm/i915: Wake up the device for the fbdev setup
  drm/i915: Add interface to reserve fence registers for vGPU
  drm/i915: Use correct path to trace include
  drm/i915: Fix the missing PPAT cache attributes on CNL
  drm/i915: Fix enum pipe vs. enum transcoder for the PCH transcoder
  ...
This commit is contained in:
Dave Airlie 2017-09-28 07:12:44 +10:00
commit 9afafdbfbf
42 changed files with 1948 additions and 1305 deletions

View File

@ -150,5 +150,3 @@ endif
i915-y += intel_lpe_audio.o
obj-$(CONFIG_DRM_I915) += i915.o
CFLAGS_i915_trace_points.o := -I$(src)

View File

@ -173,8 +173,8 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
@ -187,24 +187,19 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct drm_i915_fence_reg *reg;
int i;
struct list_head *pos, *q;
intel_runtime_pm_get(dev_priv);
/* Request fences from host */
mutex_lock(&dev_priv->drm.struct_mutex);
i = 0;
list_for_each_safe(pos, q, &dev_priv->mm.fence_list) {
reg = list_entry(pos, struct drm_i915_fence_reg, link);
if (reg->pin_count || reg->vma)
continue;
list_del(pos);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = i915_reserve_fence(dev_priv);
if (IS_ERR(reg))
goto out_free_fence;
vgpu->fence.regs[i] = reg;
if (++i == vgpu_fence_sz(vgpu))
break;
}
if (i != vgpu_fence_sz(vgpu))
goto out_free_fence;
_clear_vgpu_fence(vgpu);
@ -212,13 +207,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_put(dev_priv);
return 0;
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
/* Return fences to host, if fail */
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
if (!reg)
continue;
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);

View File

@ -239,7 +239,8 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n");
WARN_ON(!IS_SKYLAKE(dev_priv) &&
!IS_KABYLAKE(dev_priv));
!IS_KABYLAKE(dev_priv) &&
!IS_COFFEELAKE(dev_priv));
} else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CNP;
DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n");

View File

@ -80,8 +80,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20170818"
#define DRIVER_TIMESTAMP 1503088845
#define DRIVER_DATE "20170907"
#define DRIVER_TIMESTAMP 1504772900
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@ -569,6 +569,24 @@ struct i915_hotplug {
(__i)++) \
for_each_if (plane_state)
#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_crtc && \
((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
(new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
(__i)++) \
for_each_if (crtc)
#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
(old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
(new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
(__i)++) \
for_each_if (plane)
struct drm_i915_private;
struct i915_mm_struct;
struct i915_mmu_object;
@ -841,6 +859,7 @@ struct intel_device_info {
u8 gen;
u16 gen_mask;
enum intel_platform platform;
u8 gt; /* GT number, 0 if undefined */
u8 ring_mask; /* Rings supported by the HW */
u8 num_rings;
#define DEFINE_FLAG(name) u8 name:1
@ -1106,6 +1125,7 @@ struct intel_fbc {
} fb;
int cfb_size;
unsigned int gen9_wa_cfb_stride;
} params;
struct intel_fbc_work {
@ -1464,6 +1484,11 @@ struct i915_gem_mm {
struct llist_head free_list;
struct work_struct free_work;
/**
* Small stash of WC pages
*/
struct pagevec wc_stash;
/** Usable portion of the GTT for GEM */
dma_addr_t stolen_base; /* limited to low memory (32-bit) */
@ -1717,7 +1742,7 @@ struct intel_vbt_data {
int crt_ddc_pin;
int child_dev_num;
union child_device_config *child_dev;
struct child_device_config *child_dev;
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
struct sdvo_device_mapping sdvo_mappings[2];
@ -2328,7 +2353,8 @@ struct drm_i915_private {
struct mutex dpll_lock;
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
@ -2861,9 +2887,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_G33(dev_priv) ((dev_priv)->info.platform == INTEL_G33)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.platform == INTEL_IVYBRIDGE)
#define IS_IVB_GT1(dev_priv) (INTEL_DEVID(dev_priv) == 0x0156 || \
INTEL_DEVID(dev_priv) == 0x0152 || \
INTEL_DEVID(dev_priv) == 0x015a)
#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
(dev_priv)->info.gt == 1)
#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_VALLEYVIEW)
#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_CHERRYVIEW)
#define IS_HASWELL(dev_priv) ((dev_priv)->info.platform == INTEL_HASWELL)
@ -2885,11 +2910,11 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xf) == 0xe)
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
(dev_priv)->info.gt == 3)
#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
(dev_priv)->info.gt == 3)
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
INTEL_DEVID(dev_priv) == 0x0A1E)
@ -2910,15 +2935,15 @@ intel_info(const struct drm_i915_private *dev_priv)
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
(dev_priv)->info.gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
(dev_priv)->info.gt == 3)
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0030)
(dev_priv)->info.gt == 4)
#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0010)
(dev_priv)->info.gt == 2)
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
(dev_priv)->info.gt == 3)
#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
@ -3647,6 +3672,9 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
struct drm_i915_fence_reg *
i915_reserve_fence(struct drm_i915_private *dev_priv);
void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
@ -4332,11 +4360,4 @@ int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
static inline bool
intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
return __intel_engine_can_store_dword(INTEL_GEN(engine->i915),
engine->class);
}
#endif

View File

@ -1013,17 +1013,20 @@ gtt_user_read(struct io_mapping *mapping,
loff_t base, int offset,
char __user *user_data, int length)
{
void *vaddr;
void __iomem *vaddr;
unsigned long unwritten;
/* We can use the cpu mem copy function because this is X86. */
vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
vaddr = io_mapping_map_atomic_wc(mapping, base);
unwritten = __copy_to_user_inatomic(user_data,
(void __force *)vaddr + offset,
length);
io_mapping_unmap_atomic(vaddr);
if (unwritten) {
vaddr = (void __force *)
io_mapping_map_wc(mapping, base, PAGE_SIZE);
unwritten = copy_to_user(user_data, vaddr + offset, length);
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
unwritten = copy_to_user(user_data,
(void __force *)vaddr + offset,
length);
io_mapping_unmap(vaddr);
}
return unwritten;
@ -1189,18 +1192,18 @@ ggtt_write(struct io_mapping *mapping,
loff_t base, int offset,
char __user *user_data, int length)
{
void *vaddr;
void __iomem *vaddr;
unsigned long unwritten;
/* We can use the cpu mem copy function because this is X86. */
vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
vaddr = io_mapping_map_atomic_wc(mapping, base);
unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
user_data, length);
io_mapping_unmap_atomic(vaddr);
if (unwritten) {
vaddr = (void __force *)
io_mapping_map_wc(mapping, base, PAGE_SIZE);
unwritten = copy_from_user(vaddr + offset, user_data, length);
vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
unwritten = copy_from_user((void __force *)vaddr + offset,
user_data, length);
io_mapping_unmap(vaddr);
}
@ -2476,8 +2479,6 @@ static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
struct sg_table *pages;
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
@ -2507,6 +2508,8 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return err;
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
err = ____i915_gem_object_get_pages(obj);
if (err)
goto unlock;
@ -2590,6 +2593,8 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
ret = ____i915_gem_object_get_pages(obj);
if (ret)
goto err_unlock;
@ -3257,11 +3262,11 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
struct i915_gem_context *ctx = lut->ctx;
struct i915_vma *vma;
GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
if (ctx->file_priv != fpriv)
continue;
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
GEM_BUG_ON(vma->obj != obj);
/* We allow the process to have multiple handles to the same
@ -3375,24 +3380,12 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
return 0;
}
static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms)
{
return wait_for(intel_engine_is_idle(engine), timeout_ms);
}
static int wait_for_engines(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
if (GEM_WARN_ON(wait_for_engine(engine, 50))) {
i915_gem_set_wedged(i915);
return -EIO;
}
GEM_BUG_ON(intel_engine_get_seqno(engine) !=
intel_engine_last_submit(engine));
if (wait_for(intel_engines_are_idle(i915), 50)) {
DRM_ERROR("Failed to idle engines, declaring wedged!\n");
i915_gem_set_wedged(i915);
return -EIO;
}
return 0;
@ -4426,6 +4419,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
llist_for_each_entry_safe(obj, on, freed, freed) {
GEM_BUG_ON(obj->bind_count);
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list));
if (obj->ops->release)
obj->ops->release(obj);
@ -4533,6 +4527,12 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
void i915_gem_sanitize(struct drm_i915_private *i915)
{
if (i915_terminally_wedged(&i915->gpu_error)) {
mutex_lock(&i915->drm.struct_mutex);
i915_gem_unset_wedged(i915);
mutex_unlock(&i915->drm.struct_mutex);
}
/*
* If we inherit context state from the BIOS or earlier occupants
* of the GPU, the GPU may be in an inconsistent state when we
@ -4572,7 +4572,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
if (ret && ret != -EIO)
goto err_unlock;
assert_kernel_context_is_current(dev_priv);
@ -4594,7 +4594,8 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* reset the GPU back to its idle, low power state.
*/
WARN_ON(dev_priv->gt.awake);
WARN_ON(!intel_engines_are_idle(dev_priv));
if (WARN_ON(!intel_engines_are_idle(dev_priv)))
i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
/*
* Neither the BIOS, ourselves or any other kernel
@ -4616,11 +4617,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* machine in an unusable condition.
*/
i915_gem_sanitize(dev_priv);
goto out_rpm_put;
intel_runtime_pm_put(dev_priv);
return 0;
err_unlock:
mutex_unlock(&dev->struct_mutex);
out_rpm_put:
intel_runtime_pm_put(dev_priv);
return ret;
}

View File

@ -268,6 +268,11 @@ static inline u64 gen8_noncanonical_addr(u64 address)
return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
}
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
return eb->engine->needs_cmd_parser && eb->batch_len;
}
static int eb_create(struct i915_execbuffer *eb)
{
if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
@ -1159,6 +1164,13 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
if (unlikely(!cache->rq)) {
int err;
/* If we need to copy for the cmdparser, we will stall anyway */
if (eb_use_cmdparser(eb))
return ERR_PTR(-EWOULDBLOCK);
if (!intel_engine_can_store_dword(eb->engine))
return ERR_PTR(-ENODEV);
err = __reloc_gpu_alloc(eb, vma, len);
if (unlikely(err))
return ERR_PTR(err);
@ -1183,9 +1195,7 @@ relocate_entry(struct i915_vma *vma,
if (!eb->reloc_cache.vaddr &&
(DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
!reservation_object_test_signaled_rcu(vma->resv, true)) &&
__intel_engine_can_store_dword(eb->reloc_cache.gen,
eb->engine->class)) {
!reservation_object_test_signaled_rcu(vma->resv, true))) {
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
u32 *batch;
@ -2291,7 +2301,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
if (eb.engine->needs_cmd_parser && eb.batch_len) {
if (eb_use_cmdparser(&eb)) {
struct i915_vma *vma;
vma = eb_parse(&eb, drm_is_current_master(file));

View File

@ -359,6 +359,57 @@ i915_vma_get_fence(struct i915_vma *vma)
return fence_update(fence, set);
}
/**
* i915_reserve_fence - Reserve a fence for vGPU
* @dev_priv: i915 device private
*
* This function walks the fence regs looking for a free one and remove
* it from the fence_list. It is used to reserve fence for vGPU to use.
*/
struct drm_i915_fence_reg *
i915_reserve_fence(struct drm_i915_private *dev_priv)
{
struct drm_i915_fence_reg *fence;
int count;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Keep at least one fence available for the display engine. */
count = 0;
list_for_each_entry(fence, &dev_priv->mm.fence_list, link)
count += !fence->pin_count;
if (count <= 1)
return ERR_PTR(-ENOSPC);
fence = fence_find(dev_priv);
if (IS_ERR(fence))
return fence;
if (fence->vma) {
/* Force-remove fence from VMA */
ret = fence_update(fence, NULL);
if (ret)
return ERR_PTR(ret);
}
list_del(&fence->link);
return fence;
}
/**
* i915_unreserve_fence - Reclaim a reserved fence
* @fence: the fence reg
*
* This function add a reserved fence register from vGPU to the fence_list.
*/
void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
{
lockdep_assert_held(&fence->i915->drm.struct_mutex);
list_add(&fence->link, &fence->i915->mm.fence_list);
}
/**
* i915_gem_revoke_fences - revoke fence state
* @dev_priv: i915 device private

View File

@ -356,39 +356,86 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
struct page *page;
struct pagevec *pvec = &vm->free_pages;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
if (vm->free_pages.nr)
return vm->free_pages.pages[--vm->free_pages.nr];
if (likely(pvec->nr))
return pvec->pages[--pvec->nr];
page = alloc_page(gfp);
if (!page)
if (!vm->pt_kmap_wc)
return alloc_page(gfp);
/* A placeholder for a specific mutex to guard the WC stash */
lockdep_assert_held(&vm->i915->drm.struct_mutex);
/* Look in our global stash of WC pages... */
pvec = &vm->i915->mm.wc_stash;
if (likely(pvec->nr))
return pvec->pages[--pvec->nr];
/* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
do {
struct page *page;
page = alloc_page(gfp);
if (unlikely(!page))
break;
pvec->pages[pvec->nr++] = page;
} while (pagevec_space(pvec));
if (unlikely(!pvec->nr))
return NULL;
if (vm->pt_kmap_wc)
set_pages_array_wc(&page, 1);
set_pages_array_wc(pvec->pages, pvec->nr);
return page;
return pvec->pages[--pvec->nr];
}
static void vm_free_pages_release(struct i915_address_space *vm)
static void vm_free_pages_release(struct i915_address_space *vm,
bool immediate)
{
GEM_BUG_ON(!pagevec_count(&vm->free_pages));
struct pagevec *pvec = &vm->free_pages;
if (vm->pt_kmap_wc)
set_pages_array_wb(vm->free_pages.pages,
pagevec_count(&vm->free_pages));
GEM_BUG_ON(!pagevec_count(pvec));
__pagevec_release(&vm->free_pages);
if (vm->pt_kmap_wc) {
struct pagevec *stash = &vm->i915->mm.wc_stash;
/* When we use WC, first fill up the global stash and then
* only if full immediately free the overflow.
*/
lockdep_assert_held(&vm->i915->drm.struct_mutex);
if (pagevec_space(stash)) {
do {
stash->pages[stash->nr++] =
pvec->pages[--pvec->nr];
if (!pvec->nr)
return;
} while (pagevec_space(stash));
/* As we have made some room in the VM's free_pages,
* we can wait for it to fill again. Unless we are
* inside i915_address_space_fini() and must
* immediately release the pages!
*/
if (!immediate)
return;
}
set_pages_array_wb(pvec->pages, pvec->nr);
}
__pagevec_release(pvec);
}
static void vm_free_page(struct i915_address_space *vm, struct page *page)
{
if (!pagevec_add(&vm->free_pages, page))
vm_free_pages_release(vm);
vm_free_pages_release(vm, false);
}
static int __setup_page_dma(struct i915_address_space *vm,
@ -452,12 +499,31 @@ static void fill_page_dma_32(struct i915_address_space *vm,
static int
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{
return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
struct page *page;
dma_addr_t addr;
page = alloc_page(gfp | __GFP_ZERO);
if (unlikely(!page))
return -ENOMEM;
addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(vm->dma, addr))) {
__free_page(page);
return -ENOMEM;
}
vm->scratch_page.page = page;
vm->scratch_page.daddr = addr;
return 0;
}
static void cleanup_scratch_page(struct i915_address_space *vm)
{
cleanup_page_dma(vm, &vm->scratch_page);
struct i915_page_dma *p = &vm->scratch_page;
dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(p->page);
}
static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
@ -1337,18 +1403,18 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1ULL << 48 :
1ULL << 32;
ret = gen8_init_scratch(&ppgtt->base);
if (ret) {
ppgtt->base.total = 0;
return ret;
}
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
*/
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
ppgtt->base.pt_kmap_wc = true;
ret = gen8_init_scratch(&ppgtt->base);
if (ret) {
ppgtt->base.total = 0;
return ret;
}
if (use_4lvl(vm)) {
ret = setup_px(&ppgtt->base, &ppgtt->pml4);
if (ret)
@ -1872,7 +1938,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
static void i915_address_space_fini(struct i915_address_space *vm)
{
if (pagevec_count(&vm->free_pages))
vm_free_pages_release(vm);
vm_free_pages_release(vm, true);
i915_gem_timeline_fini(&vm->timeline);
drm_mm_takedown(&vm->mm);
@ -1885,12 +1951,12 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */
/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */
if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
else if (IS_GEN9_BC(dev_priv))
else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
else if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
@ -2598,6 +2664,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma, *vn;
struct pagevec *pvec;
ggtt->base.closed = true;
@ -2621,6 +2688,13 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
}
ggtt->base.cleanup(&ggtt->base);
pvec = &dev_priv->mm.wc_stash;
if (pvec->nr) {
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
mutex_unlock(&dev_priv->drm.struct_mutex);
arch_phys_wc_del(ggtt->mtrr);
@ -2716,13 +2790,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
/*
* On BXT writes larger than 64 bit to the GTT pagetable range will be
* dropped. For WC mappings in general we have 64 byte burst writes
* when the WC buffer is flushed, so we can't use it, but have to
* On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
* will be dropped. For WC mappings in general we have 64 byte burst
* writes when the WC buffer is flushed, so we can't use it, but have to
* resort to an uncached mapping. The WC issue is easily caught by the
* readback check when writing GTT PTE entries.
*/
if (IS_GEN9_LP(dev_priv))
if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
ggtt->gsm = ioremap_nocache(phys_addr, size);
else
ggtt->gsm = ioremap_wc(phys_addr, size);

View File

@ -336,7 +336,7 @@ void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask)
__gen6_mask_pm_irq(dev_priv, mask);
}
void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
@ -347,7 +347,7 @@ void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
POSTING_READ(reg);
}
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
{
lockdep_assert_held(&dev_priv->irq_lock);
@ -357,7 +357,7 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
{
lockdep_assert_held(&dev_priv->irq_lock);
@ -405,7 +405,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
synchronize_irq(dev_priv->drm.irq);
/* Now that we will not be generating any more work, flush any
* outsanding tasks. As we are called on the RPS idle path,
* outstanding tasks. As we are called on the RPS idle path,
* we will reset the GPU to minimum frequencies, so the current
* state of the worker can be discarded.
*/

View File

@ -63,22 +63,23 @@
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
static const struct intel_device_info intel_i830_info = {
static const struct intel_device_info intel_i830_info __initconst = {
GEN2_FEATURES,
.platform = INTEL_I830,
.is_mobile = 1, .cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
static const struct intel_device_info intel_i845g_info = {
static const struct intel_device_info intel_i845g_info __initconst = {
GEN2_FEATURES,
.platform = INTEL_I845G,
};
static const struct intel_device_info intel_i85x_info = {
static const struct intel_device_info intel_i85x_info __initconst = {
GEN2_FEATURES,
.platform = INTEL_I85X, .is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
@ -86,7 +87,7 @@ static const struct intel_device_info intel_i85x_info = {
.has_fbc = 1,
};
static const struct intel_device_info intel_i865g_info = {
static const struct intel_device_info intel_i865g_info __initconst = {
GEN2_FEATURES,
.platform = INTEL_I865G,
};
@ -95,10 +96,11 @@ static const struct intel_device_info intel_i865g_info = {
.gen = 3, .num_pipes = 2, \
.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
static const struct intel_device_info intel_i915g_info = {
static const struct intel_device_info intel_i915g_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_I915G, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
@ -106,7 +108,7 @@ static const struct intel_device_info intel_i915g_info = {
.unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_i915gm_info = {
static const struct intel_device_info intel_i915gm_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_I915GM,
.is_mobile = 1,
@ -118,7 +120,7 @@ static const struct intel_device_info intel_i915gm_info = {
.unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_i945g_info = {
static const struct intel_device_info intel_i945g_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_I945G,
.has_hotplug = 1, .cursor_needs_physical = 1,
@ -127,7 +129,7 @@ static const struct intel_device_info intel_i945g_info = {
.unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_i945gm_info = {
static const struct intel_device_info intel_i945gm_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_I945GM, .is_mobile = 1,
.has_hotplug = 1, .cursor_needs_physical = 1,
@ -138,14 +140,14 @@ static const struct intel_device_info intel_i945gm_info = {
.unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_g33_info = {
static const struct intel_device_info intel_g33_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_G33,
.has_hotplug = 1,
.has_overlay = 1,
};
static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_pineview_info __initconst = {
GEN3_FEATURES,
.platform = INTEL_PINEVIEW, .is_mobile = 1,
.has_hotplug = 1,
@ -157,17 +159,18 @@ static const struct intel_device_info intel_pineview_info = {
.has_hotplug = 1, \
.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
static const struct intel_device_info intel_i965g_info = {
static const struct intel_device_info intel_i965g_info __initconst = {
GEN4_FEATURES,
.platform = INTEL_I965G,
.has_overlay = 1,
.hws_needs_physical = 1,
};
static const struct intel_device_info intel_i965gm_info = {
static const struct intel_device_info intel_i965gm_info __initconst = {
GEN4_FEATURES,
.platform = INTEL_I965GM,
.is_mobile = 1, .has_fbc = 1,
@ -176,14 +179,14 @@ static const struct intel_device_info intel_i965gm_info = {
.hws_needs_physical = 1,
};
static const struct intel_device_info intel_g45_info = {
static const struct intel_device_info intel_g45_info __initconst = {
GEN4_FEATURES,
.platform = INTEL_G45,
.has_pipe_cxsr = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
static const struct intel_device_info intel_gm45_info = {
static const struct intel_device_info intel_gm45_info __initconst = {
GEN4_FEATURES,
.platform = INTEL_GM45,
.is_mobile = 1, .has_fbc = 1,
@ -197,15 +200,16 @@ static const struct intel_device_info intel_gm45_info = {
.has_hotplug = 1, \
.has_gmbus_irq = 1, \
.ring_mask = RENDER_RING | BSD_RING, \
.has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_d_info __initconst = {
GEN5_FEATURES,
.platform = INTEL_IRONLAKE,
};
static const struct intel_device_info intel_ironlake_m_info = {
static const struct intel_device_info intel_ironlake_m_info __initconst = {
GEN5_FEATURES,
.platform = INTEL_IRONLAKE,
.is_mobile = 1, .has_fbc = 1,
@ -224,15 +228,34 @@ static const struct intel_device_info intel_ironlake_m_info = {
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
static const struct intel_device_info intel_sandybridge_d_info = {
GEN6_FEATURES,
.platform = INTEL_SANDYBRIDGE,
#define SNB_D_PLATFORM \
GEN6_FEATURES, \
.platform = INTEL_SANDYBRIDGE
static const struct intel_device_info intel_sandybridge_d_gt1_info __initconst = {
SNB_D_PLATFORM,
.gt = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
GEN6_FEATURES,
.platform = INTEL_SANDYBRIDGE,
.is_mobile = 1,
static const struct intel_device_info intel_sandybridge_d_gt2_info __initconst = {
SNB_D_PLATFORM,
.gt = 2,
};
#define SNB_M_PLATFORM \
GEN6_FEATURES, \
.platform = INTEL_SANDYBRIDGE, \
.is_mobile = 1
static const struct intel_device_info intel_sandybridge_m_gt1_info __initconst = {
SNB_M_PLATFORM,
.gt = 1,
};
static const struct intel_device_info intel_sandybridge_m_gt2_info __initconst = {
SNB_M_PLATFORM,
.gt = 2,
};
#define GEN7_FEATURES \
@ -249,27 +272,46 @@ static const struct intel_device_info intel_sandybridge_m_info = {
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS
static const struct intel_device_info intel_ivybridge_d_info = {
GEN7_FEATURES,
.platform = INTEL_IVYBRIDGE,
.has_l3_dpf = 1,
#define IVB_D_PLATFORM \
GEN7_FEATURES, \
.platform = INTEL_IVYBRIDGE, \
.has_l3_dpf = 1
static const struct intel_device_info intel_ivybridge_d_gt1_info __initconst = {
IVB_D_PLATFORM,
.gt = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
GEN7_FEATURES,
.platform = INTEL_IVYBRIDGE,
.is_mobile = 1,
.has_l3_dpf = 1,
static const struct intel_device_info intel_ivybridge_d_gt2_info __initconst = {
IVB_D_PLATFORM,
.gt = 2,
};
static const struct intel_device_info intel_ivybridge_q_info = {
#define IVB_M_PLATFORM \
GEN7_FEATURES, \
.platform = INTEL_IVYBRIDGE, \
.is_mobile = 1, \
.has_l3_dpf = 1
static const struct intel_device_info intel_ivybridge_m_gt1_info __initconst = {
IVB_M_PLATFORM,
.gt = 1,
};
static const struct intel_device_info intel_ivybridge_m_gt2_info __initconst = {
IVB_M_PLATFORM,
.gt = 2,
};
static const struct intel_device_info intel_ivybridge_q_info __initconst = {
GEN7_FEATURES,
.platform = INTEL_IVYBRIDGE,
.gt = 2,
.num_pipes = 0, /* legal, last one wins */
.has_l3_dpf = 1,
};
static const struct intel_device_info intel_valleyview_info = {
static const struct intel_device_info intel_valleyview_info __initconst = {
.platform = INTEL_VALLEYVIEW,
.gen = 7,
.is_lp = 1,
@ -282,6 +324,7 @@ static const struct intel_device_info intel_valleyview_info = {
.has_hotplug = 1,
.has_aliasing_ppgtt = 1,
.has_full_ppgtt = 1,
.has_snoop = true,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_DEFAULT_PIPEOFFSETS,
@ -299,10 +342,24 @@ static const struct intel_device_info intel_valleyview_info = {
.has_rc6p = 0 /* RC6p removed-by HSW */, \
.has_runtime_pm = 1
static const struct intel_device_info intel_haswell_info = {
HSW_FEATURES,
.platform = INTEL_HASWELL,
.has_l3_dpf = 1,
#define HSW_PLATFORM \
HSW_FEATURES, \
.platform = INTEL_HASWELL, \
.has_l3_dpf = 1
static const struct intel_device_info intel_haswell_gt1_info __initconst = {
HSW_PLATFORM,
.gt = 1,
};
static const struct intel_device_info intel_haswell_gt2_info __initconst = {
HSW_PLATFORM,
.gt = 2,
};
static const struct intel_device_info intel_haswell_gt3_info __initconst = {
HSW_PLATFORM,
.gt = 3,
};
#define BDW_FEATURES \
@ -318,16 +375,31 @@ static const struct intel_device_info intel_haswell_info = {
.gen = 8, \
.platform = INTEL_BROADWELL
static const struct intel_device_info intel_broadwell_info = {
static const struct intel_device_info intel_broadwell_gt1_info __initconst = {
BDW_PLATFORM,
.gt = 1,
};
static const struct intel_device_info intel_broadwell_gt3_info = {
static const struct intel_device_info intel_broadwell_gt2_info __initconst = {
BDW_PLATFORM,
.gt = 2,
};
static const struct intel_device_info intel_broadwell_rsvd_info __initconst = {
BDW_PLATFORM,
.gt = 3,
/* According to the device ID those devices are GT3, they were
* previously treated as not GT3, keep it like that.
*/
};
static const struct intel_device_info intel_broadwell_gt3_info __initconst = {
BDW_PLATFORM,
.gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_cherryview_info = {
static const struct intel_device_info intel_cherryview_info __initconst = {
.gen = 8, .num_pipes = 3,
.has_hotplug = 1,
.is_lp = 1,
@ -344,6 +416,7 @@ static const struct intel_device_info intel_cherryview_info = {
.has_aliasing_ppgtt = 1,
.has_full_ppgtt = 1,
.has_reset_engine = 1,
.has_snoop = true,
.display_mmio_offset = VLV_DISPLAY_BASE,
GEN_CHV_PIPEOFFSETS,
CURSOR_OFFSETS,
@ -358,13 +431,29 @@ static const struct intel_device_info intel_cherryview_info = {
.has_guc = 1, \
.ddb_size = 896
static const struct intel_device_info intel_skylake_info = {
static const struct intel_device_info intel_skylake_gt1_info __initconst = {
SKL_PLATFORM,
.gt = 1,
};
static const struct intel_device_info intel_skylake_gt3_info = {
static const struct intel_device_info intel_skylake_gt2_info __initconst = {
SKL_PLATFORM,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
.gt = 2,
};
#define SKL_GT3_PLUS_PLATFORM \
SKL_PLATFORM, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
static const struct intel_device_info intel_skylake_gt3_info __initconst = {
SKL_GT3_PLUS_PLATFORM,
.gt = 3,
};
static const struct intel_device_info intel_skylake_gt4_info __initconst = {
SKL_GT3_PLUS_PLATFORM,
.gt = 4,
};
#define GEN9_LP_FEATURES \
@ -390,18 +479,18 @@ static const struct intel_device_info intel_skylake_gt3_info = {
.has_full_ppgtt = 1, \
.has_full_48bit_ppgtt = 1, \
.has_reset_engine = 1, \
.has_snoop = true, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS, \
BDW_COLORS
static const struct intel_device_info intel_broxton_info = {
static const struct intel_device_info intel_broxton_info __initconst = {
GEN9_LP_FEATURES,
.platform = INTEL_BROXTON,
.ddb_size = 512,
.has_reset_engine = false,
};
static const struct intel_device_info intel_geminilake_info = {
static const struct intel_device_info intel_geminilake_info __initconst = {
GEN9_LP_FEATURES,
.platform = INTEL_GEMINILAKE,
.ddb_size = 1024,
@ -416,12 +505,19 @@ static const struct intel_device_info intel_geminilake_info = {
.has_guc = 1, \
.ddb_size = 896
static const struct intel_device_info intel_kabylake_info = {
static const struct intel_device_info intel_kabylake_gt1_info __initconst = {
KBL_PLATFORM,
.gt = 1,
};
static const struct intel_device_info intel_kabylake_gt3_info = {
static const struct intel_device_info intel_kabylake_gt2_info __initconst = {
KBL_PLATFORM,
.gt = 2,
};
static const struct intel_device_info intel_kabylake_gt3_info __initconst = {
KBL_PLATFORM,
.gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
@ -434,20 +530,28 @@ static const struct intel_device_info intel_kabylake_gt3_info = {
.has_guc = 1, \
.ddb_size = 896
static const struct intel_device_info intel_coffeelake_info = {
static const struct intel_device_info intel_coffeelake_gt1_info __initconst = {
CFL_PLATFORM,
.gt = 1,
};
static const struct intel_device_info intel_coffeelake_gt3_info = {
static const struct intel_device_info intel_coffeelake_gt2_info __initconst = {
CFL_PLATFORM,
.gt = 2,
};
static const struct intel_device_info intel_coffeelake_gt3_info __initconst = {
CFL_PLATFORM,
.gt = 3,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
};
static const struct intel_device_info intel_cannonlake_info = {
static const struct intel_device_info intel_cannonlake_gt2_info __initconst = {
BDW_FEATURES,
.is_alpha_support = 1,
.platform = INTEL_CANNONLAKE,
.gen = 10,
.gt = 2,
.ddb_size = 1024,
.has_csr = 1,
.color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
@ -476,31 +580,40 @@ static const struct pci_device_id pciidlist[] = {
INTEL_PINEVIEW_IDS(&intel_pineview_info),
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info),
INTEL_SNB_D_GT2_IDS(&intel_sandybridge_d_gt2_info),
INTEL_SNB_M_GT1_IDS(&intel_sandybridge_m_gt1_info),
INTEL_SNB_M_GT2_IDS(&intel_sandybridge_m_gt2_info),
INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
INTEL_HSW_IDS(&intel_haswell_info),
INTEL_IVB_M_GT1_IDS(&intel_ivybridge_m_gt1_info),
INTEL_IVB_M_GT2_IDS(&intel_ivybridge_m_gt2_info),
INTEL_IVB_D_GT1_IDS(&intel_ivybridge_d_gt1_info),
INTEL_IVB_D_GT2_IDS(&intel_ivybridge_d_gt2_info),
INTEL_HSW_GT1_IDS(&intel_haswell_gt1_info),
INTEL_HSW_GT2_IDS(&intel_haswell_gt2_info),
INTEL_HSW_GT3_IDS(&intel_haswell_gt3_info),
INTEL_VLV_IDS(&intel_valleyview_info),
INTEL_BDW_GT12_IDS(&intel_broadwell_info),
INTEL_BDW_GT1_IDS(&intel_broadwell_gt1_info),
INTEL_BDW_GT2_IDS(&intel_broadwell_gt2_info),
INTEL_BDW_GT3_IDS(&intel_broadwell_gt3_info),
INTEL_BDW_RSVD_IDS(&intel_broadwell_info),
INTEL_BDW_RSVD_IDS(&intel_broadwell_rsvd_info),
INTEL_CHV_IDS(&intel_cherryview_info),
INTEL_SKL_GT1_IDS(&intel_skylake_info),
INTEL_SKL_GT2_IDS(&intel_skylake_info),
INTEL_SKL_GT1_IDS(&intel_skylake_gt1_info),
INTEL_SKL_GT2_IDS(&intel_skylake_gt2_info),
INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
INTEL_SKL_GT4_IDS(&intel_skylake_gt4_info),
INTEL_BXT_IDS(&intel_broxton_info),
INTEL_GLK_IDS(&intel_geminilake_info),
INTEL_KBL_GT1_IDS(&intel_kabylake_info),
INTEL_KBL_GT2_IDS(&intel_kabylake_info),
INTEL_KBL_GT1_IDS(&intel_kabylake_gt1_info),
INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
INTEL_CFL_S_IDS(&intel_coffeelake_info),
INTEL_CFL_H_IDS(&intel_coffeelake_info),
INTEL_CFL_U_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info),
INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);

View File

@ -2373,6 +2373,7 @@ enum i915_power_well_id {
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1<<24)
#if 0
#define PRB0_TAIL _MMIO(0x2030)
@ -2491,6 +2492,7 @@ enum i915_power_well_id {
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 _MMIO(0x2090)
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
#define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
#define _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH (1 << 1) /* gen6 */
@ -2938,6 +2940,9 @@ enum i915_power_well_id {
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
#define GLK_SKIP_SEG_EN (1<<12)
#define GLK_SKIP_SEG_COUNT_MASK (3<<10)
#define GLK_SKIP_SEG_COUNT(x) ((x)<<10)
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1)
@ -3806,6 +3811,12 @@ enum {
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
/*
* GEN10 clock gating regs
*/
#define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4)
#define SARBUNIT_CLKGATE_DIS (1 << 5)
/*
* Display engine regs
*/
@ -6916,6 +6927,10 @@ enum {
#define GLK_CL1_PWR_DOWN (1 << 11)
#define GLK_CL0_PWR_DOWN (1 << 10)
#define CHICKEN_MISC_4 _MMIO(0x4208c)
#define FBC_STRIDE_OVERRIDE (1 << 13)
#define FBC_STRIDE_MASK 0x1FFF
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define HSW_FBCQ_DIS (1 << 22)
@ -7017,6 +7032,7 @@ enum {
/* GEN8 chicken */
#define HDC_CHICKEN0 _MMIO(0x7300)
#define CNL_HDC_CHICKEN0 _MMIO(0xE5F0)
#define HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE (1<<15)
#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
@ -7470,6 +7486,7 @@ enum {
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
#define CNP_PWM_CGE_GATING_DISABLE (1<<13)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
/* CPU: FDI_TX */
@ -8044,10 +8061,12 @@ enum {
#define FLOW_CONTROL_ENABLE (1<<15)
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
#define STALL_DOP_GATING_DISABLE (1<<5)
#define THROTTLE_12_5 (7<<2)
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
#define DOP_CLOCK_GATING_DISABLE (1<<0)
#define PUSH_CONSTANT_DEREF_DISABLE (1<<8)
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@ -8059,6 +8078,7 @@ enum {
#define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5)
#define CNL_FAST_ANISO_L1_BANKING_FIX (1<<4)
#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)

View File

@ -1031,5 +1031,5 @@ TRACE_EVENT(switch_mm,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/i915
#include <trace/define_trace.h>

View File

@ -107,7 +107,9 @@ intel_plane_destroy_state(struct drm_plane *plane,
drm_atomic_helper_plane_destroy_state(plane, state);
}
int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state)
{
struct drm_plane *plane = intel_state->base.plane;
@ -124,7 +126,7 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
* anything driver-specific we need to test in that case, so
* just return success.
*/
if (!intel_state->base.crtc && !plane->state->crtc)
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
/* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */
@ -194,16 +196,21 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
else
crtc_state->active_planes &= ~BIT(intel_plane->id);
return intel_plane_atomic_calc_changes(&crtc_state->base, state);
return intel_plane_atomic_calc_changes(old_crtc_state,
&crtc_state->base,
old_plane_state,
state);
}
static int intel_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_plane_state *new_plane_state)
{
struct drm_crtc *crtc = state->crtc;
struct drm_crtc_state *drm_crtc_state;
crtc = crtc ? crtc : plane->state->crtc;
struct drm_atomic_state *state = new_plane_state->state;
const struct drm_plane_state *old_plane_state =
drm_atomic_get_old_plane_state(state, plane);
struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
const struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
/*
* Both crtc and plane->crtc could be NULL if we're updating a
@ -214,29 +221,33 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
if (WARN_ON(!drm_crtc_state))
return -EINVAL;
old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
return intel_plane_atomic_check_with_state(to_intel_crtc_state(drm_crtc_state),
to_intel_plane_state(state));
return intel_plane_atomic_check_with_state(to_intel_crtc_state(old_crtc_state),
to_intel_crtc_state(new_crtc_state),
to_intel_plane_state(old_plane_state),
to_intel_plane_state(new_plane_state));
}
static void intel_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct intel_atomic_state *state = to_intel_atomic_state(old_state->state);
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state =
to_intel_plane_state(plane->state);
struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
const struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state, intel_plane);
struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc;
if (new_plane_state->base.visible) {
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc));
if (intel_state->base.visible) {
trace_intel_update_plane(plane,
to_intel_crtc(crtc));
intel_plane->update_plane(intel_plane,
to_intel_crtc_state(crtc->state),
intel_state);
new_crtc_state, new_plane_state);
} else {
trace_intel_disable_plane(plane,
to_intel_crtc(crtc));

View File

@ -452,24 +452,24 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
}
}
static const union child_device_config *
child_device_ptr(const struct bdb_general_definitions *p_defs, int i)
static const struct child_device_config *
child_device_ptr(const struct bdb_general_definitions *defs, int i)
{
return (const void *) &p_defs->devices[i * p_defs->child_dev_size];
return (const void *) &defs->devices[i * defs->child_dev_size];
}
static void
parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
struct sdvo_device_mapping *p_mapping;
const struct bdb_general_definitions *p_defs;
const struct old_child_dev_config *child; /* legacy */
struct sdvo_device_mapping *mapping;
const struct bdb_general_definitions *defs;
const struct child_device_config *child;
int i, child_device_num, count;
u16 block_size;
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!defs) {
DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
return;
}
@ -479,18 +479,17 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
* device size matches that of the *legacy* child device config
* struct. Thus, SDVO mapping will be skipped for newer VBT.
*/
if (p_defs->child_dev_size != sizeof(*child)) {
if (defs->child_dev_size != LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n");
return;
}
/* get the block size of general definitions */
block_size = get_blocksize(p_defs);
block_size = get_blocksize(defs);
/* get the number of child device */
child_device_num = (block_size - sizeof(*p_defs)) /
p_defs->child_dev_size;
child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
count = 0;
for (i = 0; i < child_device_num; i++) {
child = &child_device_ptr(p_defs, i)->old;
child = child_device_ptr(defs, i);
if (!child->device_type) {
/* skip the device block if device type is invalid */
continue;
@ -514,20 +513,20 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
child->slave_addr,
(child->dvo_port == DEVICE_PORT_DVOB) ?
"SDVOB" : "SDVOC");
p_mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
if (!p_mapping->initialized) {
p_mapping->dvo_port = child->dvo_port;
p_mapping->slave_addr = child->slave_addr;
p_mapping->dvo_wiring = child->dvo_wiring;
p_mapping->ddc_pin = child->ddc_pin;
p_mapping->i2c_pin = child->i2c_pin;
p_mapping->initialized = 1;
mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
if (!mapping->initialized) {
mapping->dvo_port = child->dvo_port;
mapping->slave_addr = child->slave_addr;
mapping->dvo_wiring = child->dvo_wiring;
mapping->ddc_pin = child->ddc_pin;
mapping->i2c_pin = child->i2c_pin;
mapping->initialized = 1;
DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
p_mapping->dvo_port,
p_mapping->slave_addr,
p_mapping->dvo_wiring,
p_mapping->ddc_pin,
p_mapping->i2c_pin);
mapping->dvo_port,
mapping->slave_addr,
mapping->dvo_wiring,
mapping->ddc_pin,
mapping->i2c_pin);
} else {
DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
"two SDVO device.\n");
@ -577,7 +576,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
const struct bdb_edp *edp;
const struct edp_power_seq *edp_pps;
const struct edp_link_params *edp_link_params;
const struct edp_fast_link_params *edp_link_params;
int panel_type = dev_priv->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
@ -601,7 +600,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
/* Get the eDP sequencing and link info */
edp_pps = &edp->power_seqs[panel_type];
edp_link_params = &edp->link_params[panel_type];
edp_link_params = &edp->fast_link_params[panel_type];
dev_priv->vbt.edp.pps = *edp_pps;
@ -1113,7 +1112,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
const struct bdb_header *bdb)
{
union child_device_config *it, *child = NULL;
struct child_device_config *it, *child = NULL;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
uint8_t hdmi_level_shift;
int i, j;
@ -1141,7 +1140,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (dvo_ports[port][j] == -1)
break;
if (it->common.dvo_port == dvo_ports[port][j]) {
if (it->dvo_port == dvo_ports[port][j]) {
if (child) {
DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
port_name(port));
@ -1154,14 +1153,14 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (!child)
return;
aux_channel = child->common.aux_channel;
ddc_pin = child->common.ddc_pin;
aux_channel = child->aux_channel;
ddc_pin = child->ddc_pin;
is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT;
is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
info->supports_dvi = is_dvi;
info->supports_hdmi = is_hdmi;
@ -1210,7 +1209,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (bdb->version >= 158) {
/* The VBT HDMI level shift values match the table we have. */
hdmi_level_shift = child->raw[7] & 0xF;
hdmi_level_shift = child->hdmi_level_shifter_value;
DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
port_name(port),
hdmi_level_shift);
@ -1218,11 +1217,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
}
/* Parse the I_boost config for SKL and above */
if (bdb->version >= 196 && child->common.iboost) {
info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
if (bdb->version >= 196 && child->iboost) {
info->dp_boost_level = translate_iboost(child->dp_iboost_level);
DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
port_name(port), info->dp_boost_level);
info->hdmi_boost_level = translate_iboost(child->common.iboost_level >> 4);
info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level);
DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
port_name(port), info->hdmi_boost_level);
}
@ -1250,15 +1249,15 @@ static void
parse_device_mapping(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
const struct bdb_general_definitions *p_defs;
const union child_device_config *p_child;
union child_device_config *child_dev_ptr;
const struct bdb_general_definitions *defs;
const struct child_device_config *child;
struct child_device_config *child_dev_ptr;
int i, child_device_num, count;
u8 expected_size;
u16 block_size;
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) {
defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!defs) {
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return;
}
@ -1267,41 +1266,39 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
} else if (bdb->version < 111) {
expected_size = 27;
} else if (bdb->version < 195) {
BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
expected_size = sizeof(struct old_child_dev_config);
expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
} else if (bdb->version == 195) {
expected_size = 37;
} else if (bdb->version <= 197) {
expected_size = 38;
} else {
expected_size = 38;
BUILD_BUG_ON(sizeof(*p_child) < 38);
BUILD_BUG_ON(sizeof(*child) < 38);
DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
bdb->version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
if (p_defs->child_dev_size != expected_size)
if (defs->child_dev_size != expected_size)
DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
p_defs->child_dev_size, expected_size, bdb->version);
defs->child_dev_size, expected_size, bdb->version);
/* The legacy sized child device config is the minimum we need. */
if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
DRM_DEBUG_KMS("Child device config size %u is too small.\n",
p_defs->child_dev_size);
defs->child_dev_size);
return;
}
/* get the block size of general definitions */
block_size = get_blocksize(p_defs);
block_size = get_blocksize(defs);
/* get the number of child device */
child_device_num = (block_size - sizeof(*p_defs)) /
p_defs->child_dev_size;
child_device_num = (block_size - sizeof(*defs)) / defs->child_dev_size;
count = 0;
/* get the number of child device that is present */
for (i = 0; i < child_device_num; i++) {
p_child = child_device_ptr(p_defs, i);
if (!p_child->common.device_type) {
child = child_device_ptr(defs, i);
if (!child->device_type) {
/* skip the device block if device type is invalid */
continue;
}
@ -1311,7 +1308,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
return;
}
dev_priv->vbt.child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
dev_priv->vbt.child_dev = kcalloc(count, sizeof(*child), GFP_KERNEL);
if (!dev_priv->vbt.child_dev) {
DRM_DEBUG_KMS("No memory space for child device\n");
return;
@ -1320,8 +1317,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
dev_priv->vbt.child_dev_num = count;
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = child_device_ptr(p_defs, i);
if (!p_child->common.device_type) {
child = child_device_ptr(defs, i);
if (!child->device_type) {
/* skip the device block if device type is invalid */
continue;
}
@ -1334,8 +1331,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
* (child_dev_size) of the child device. Accessing the data must
* depend on VBT version.
*/
memcpy(child_dev_ptr, p_child,
min_t(size_t, p_defs->child_dev_size, sizeof(*p_child)));
memcpy(child_dev_ptr, child,
min_t(size_t, defs->child_dev_size, sizeof(*child)));
/*
* copied full block, now init values when they are not
@ -1343,12 +1340,12 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
*/
if (bdb->version < 196) {
/* Set default values for bits added from v196 */
child_dev_ptr->common.iboost = 0;
child_dev_ptr->common.hpd_invert = 0;
child_dev_ptr->iboost = 0;
child_dev_ptr->hpd_invert = 0;
}
if (bdb->version < 192)
child_dev_ptr->common.lspcon = 0;
child_dev_ptr->lspcon = 0;
}
return;
}
@ -1559,7 +1556,7 @@ out:
*/
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
{
union child_device_config *p_child;
const struct child_device_config *child;
int i;
if (!dev_priv->vbt.int_tv_support)
@ -1569,11 +1566,11 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
return true;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
p_child = dev_priv->vbt.child_dev + i;
child = dev_priv->vbt.child_dev + i;
/*
* If the device type is not TV, continue.
*/
switch (p_child->old.device_type) {
switch (child->device_type) {
case DEVICE_TYPE_INT_TV:
case DEVICE_TYPE_TV:
case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
@ -1584,7 +1581,7 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
/* Only when the addin_offset is non-zero, it is regarded
* as present.
*/
if (p_child->old.addin_offset)
if (child->addin_offset)
return true;
}
@ -1601,14 +1598,14 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
*/
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
{
const struct child_device_config *child;
int i;
if (!dev_priv->vbt.child_dev_num)
return true;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
union child_device_config *uchild = dev_priv->vbt.child_dev + i;
struct old_child_dev_config *child = &uchild->old;
child = dev_priv->vbt.child_dev + i;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@ -1650,6 +1647,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
*/
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
{
const struct child_device_config *child;
static const struct {
u16 dp, hdmi;
} port_mapping[] = {
@ -1668,12 +1666,12 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
const union child_device_config *p_child =
&dev_priv->vbt.child_dev[i];
if ((p_child->common.dvo_port == port_mapping[port].dp ||
p_child->common.dvo_port == port_mapping[port].hdmi) &&
(p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
child = dev_priv->vbt.child_dev + i;
if ((child->dvo_port == port_mapping[port].dp ||
child->dvo_port == port_mapping[port].hdmi) &&
(child->device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
return true;
}
@ -1689,7 +1687,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
*/
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
{
union child_device_config *p_child;
const struct child_device_config *child;
static const short port_mapping[] = {
[PORT_B] = DVO_PORT_DPB,
[PORT_C] = DVO_PORT_DPC,
@ -1705,10 +1703,10 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
p_child = dev_priv->vbt.child_dev + i;
child = dev_priv->vbt.child_dev + i;
if (p_child->common.dvo_port == port_mapping[port] &&
(p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
if (child->dvo_port == port_mapping[port] &&
(child->device_type & DEVICE_TYPE_eDP_BITS) ==
(DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
return true;
}
@ -1716,7 +1714,7 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
}
static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
enum port port)
{
static const struct {
@ -1735,16 +1733,16 @@ static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
return false;
if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
(DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
return false;
if (p_child->common.dvo_port == port_mapping[port].dp)
if (child->dvo_port == port_mapping[port].dp)
return true;
/* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
if (p_child->common.dvo_port == port_mapping[port].hdmi &&
p_child->common.aux_channel != 0)
if (child->dvo_port == port_mapping[port].hdmi &&
child->aux_channel != 0)
return true;
return false;
@ -1753,13 +1751,13 @@ static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
enum port port)
{
const struct child_device_config *child;
int i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
const union child_device_config *p_child =
&dev_priv->vbt.child_dev[i];
child = dev_priv->vbt.child_dev + i;
if (child_dev_is_dp_dual_mode(p_child, port))
if (child_dev_is_dp_dual_mode(child, port))
return true;
}
@ -1776,17 +1774,17 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
enum port *port)
{
union child_device_config *p_child;
const struct child_device_config *child;
u8 dvo_port;
int i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
p_child = dev_priv->vbt.child_dev + i;
child = dev_priv->vbt.child_dev + i;
if (!(p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT))
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
dvo_port = p_child->common.dvo_port;
dvo_port = child->dvo_port;
switch (dvo_port) {
case DVO_PORT_MIPIA:
@ -1816,16 +1814,19 @@ bool
intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port)
{
const struct child_device_config *child;
int i;
if (WARN_ON_ONCE(!IS_GEN9_LP(dev_priv)))
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
if (!dev_priv->vbt.child_dev[i].common.hpd_invert)
child = dev_priv->vbt.child_dev + i;
if (!child->hpd_invert)
continue;
switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
switch (child->dvo_port) {
case DVO_PORT_DPA:
case DVO_PORT_HDMIA:
if (port == PORT_A)
@ -1860,16 +1861,19 @@ bool
intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
enum port port)
{
const struct child_device_config *child;
int i;
if (!HAS_LSPCON(dev_priv))
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
if (!dev_priv->vbt.child_dev[i].common.lspcon)
child = dev_priv->vbt.child_dev + i;
if (!child->lspcon)
continue;
switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
switch (child->dvo_port) {
case DVO_PORT_DPA:
case DVO_PORT_HDMIA:
if (port == PORT_A)

View File

@ -417,24 +417,21 @@ static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->cdclk = 540000;
}
static int vlv_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
{
int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ?
333333 : 320000;
int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
/*
* We seem to get an unstable or solid color picture at 200MHz.
* Not sure what's wrong. For now use 200MHz only when all pipes
* are off.
*/
if (!IS_CHERRYVIEW(dev_priv) &&
max_pixclk > freq_320*limit/100)
if (IS_VALLEYVIEW(dev_priv) && min_cdclk > freq_320)
return 400000;
else if (max_pixclk > 266667*limit/100)
else if (min_cdclk > 266667)
return freq_320;
else if (max_pixclk > 0)
else if (min_cdclk > 0)
return 266667;
else
return 200000;
@ -612,13 +609,13 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
}
static int bdw_calc_cdclk(int max_pixclk)
static int bdw_calc_cdclk(int min_cdclk)
{
if (max_pixclk > 540000)
if (min_cdclk > 540000)
return 675000;
else if (max_pixclk > 450000)
else if (min_cdclk > 450000)
return 540000;
else if (max_pixclk > 337500)
else if (min_cdclk > 337500)
return 450000;
else
return 337500;
@ -724,23 +721,23 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
cdclk, dev_priv->cdclk.hw.cdclk);
}
static int skl_calc_cdclk(int max_pixclk, int vco)
static int skl_calc_cdclk(int min_cdclk, int vco)
{
if (vco == 8640000) {
if (max_pixclk > 540000)
if (min_cdclk > 540000)
return 617143;
else if (max_pixclk > 432000)
else if (min_cdclk > 432000)
return 540000;
else if (max_pixclk > 308571)
else if (min_cdclk > 308571)
return 432000;
else
return 308571;
} else {
if (max_pixclk > 540000)
if (min_cdclk > 540000)
return 675000;
else if (max_pixclk > 450000)
else if (min_cdclk > 450000)
return 540000;
else if (max_pixclk > 337500)
else if (min_cdclk > 337500)
return 450000;
else
return 337500;
@ -1075,31 +1072,25 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
skl_set_cdclk(dev_priv, &cdclk_state);
}
static int bxt_calc_cdclk(int max_pixclk)
static int bxt_calc_cdclk(int min_cdclk)
{
if (max_pixclk > 576000)
if (min_cdclk > 576000)
return 624000;
else if (max_pixclk > 384000)
else if (min_cdclk > 384000)
return 576000;
else if (max_pixclk > 288000)
else if (min_cdclk > 288000)
return 384000;
else if (max_pixclk > 144000)
else if (min_cdclk > 144000)
return 288000;
else
return 144000;
}
static int glk_calc_cdclk(int max_pixclk)
static int glk_calc_cdclk(int min_cdclk)
{
/*
* FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
* as a temporary workaround. Use a higher cdclk instead. (Note that
* intel_compute_max_dotclk() limits the max pixel clock to 99% of max
* cdclk.)
*/
if (max_pixclk > DIV_ROUND_UP(2 * 158400 * 99, 100))
if (min_cdclk > 158400)
return 316800;
else if (max_pixclk > DIV_ROUND_UP(2 * 79200 * 99, 100))
else if (min_cdclk > 79200)
return 158400;
else
return 79200;
@ -1420,11 +1411,11 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
bxt_set_cdclk(dev_priv, &cdclk_state);
}
static int cnl_calc_cdclk(int max_pixclk)
static int cnl_calc_cdclk(int min_cdclk)
{
if (max_pixclk > 336000)
if (min_cdclk > 336000)
return 528000;
else if (max_pixclk > 168000)
else if (min_cdclk > 168000)
return 336000;
else
return 168000;
@ -1732,104 +1723,119 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
dev_priv->display.set_cdclk(dev_priv, cdclk_state);
}
static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
int pixel_rate)
static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
int pixel_rate)
{
if (INTEL_GEN(dev_priv) >= 10)
/*
* FIXME: Switch to DIV_ROUND_UP(pixel_rate, 2)
* once DDI clock voltage requirements are
* handled correctly.
*/
return pixel_rate;
else if (IS_GEMINILAKE(dev_priv))
/*
* FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
* as a temporary workaround. Use a higher cdclk instead. (Note that
* intel_compute_max_dotclk() limits the max pixel clock to 99% of max
* cdclk.)
*/
return DIV_ROUND_UP(pixel_rate * 100, 2 * 99);
else if (IS_GEN9(dev_priv) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return pixel_rate;
else if (IS_CHERRYVIEW(dev_priv))
return DIV_ROUND_UP(pixel_rate * 100, 95);
else
return DIV_ROUND_UP(pixel_rate * 100, 90);
}
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->base.crtc->dev);
int min_cdclk;
if (!crtc_state->base.enable)
return 0;
min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
min_cdclk = DIV_ROUND_UP(min_cdclk * 100, 95);
/* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
* audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
* there may be audio corruption or screen corruption." This cdclk
* restriction for GLK is 316.8 MHz and since GLK can output two
* pixels per clock, the pixel rate becomes 2 * 316.8 MHz.
* restriction for GLK is 316.8 MHz.
*/
if (intel_crtc_has_dp_encoder(crtc_state) &&
crtc_state->has_audio &&
crtc_state->port_clock >= 540000 &&
crtc_state->lane_count == 4) {
if (IS_CANNONLAKE(dev_priv))
pixel_rate = max(316800, pixel_rate);
else if (IS_GEMINILAKE(dev_priv))
pixel_rate = max(2 * 316800, pixel_rate);
else
pixel_rate = max(432000, pixel_rate);
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
/* Display WA #1145: glk,cnl */
min_cdclk = max(316800, min_cdclk);
} else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv)) {
/* Display WA #1144: skl,bxt */
min_cdclk = max(432000, min_cdclk);
}
}
/* According to BSpec, "The CD clock frequency must be at least twice
* the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
* The check for GLK has to be adjusted as the platform can output
* two pixels per clock.
*/
if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) {
if (IS_GEMINILAKE(dev_priv))
pixel_rate = max(2 * 2 * 96000, pixel_rate);
else
pixel_rate = max(2 * 96000, pixel_rate);
if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
min_cdclk = max(2 * 96000, min_cdclk);
if (min_cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
min_cdclk, dev_priv->max_cdclk_freq);
return -EINVAL;
}
return pixel_rate;
return min_cdclk;
}
/* compute the max rate for new configuration */
static int intel_max_pixel_rate(struct drm_atomic_state *state)
static int intel_compute_min_cdclk(struct drm_atomic_state *state)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = to_i915(state->dev);
struct drm_crtc *crtc;
struct drm_crtc_state *cstate;
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
unsigned int max_pixel_rate = 0, i;
int min_cdclk, i;
enum pipe pipe;
memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
sizeof(intel_state->min_pixclk));
memcpy(intel_state->min_cdclk, dev_priv->min_cdclk,
sizeof(intel_state->min_cdclk));
for_each_new_crtc_in_state(state, crtc, cstate, i) {
int pixel_rate;
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
if (min_cdclk < 0)
return min_cdclk;
crtc_state = to_intel_crtc_state(cstate);
if (!crtc_state->base.enable) {
intel_state->min_pixclk[i] = 0;
continue;
}
pixel_rate = crtc_state->pixel_rate;
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
pixel_rate =
bdw_adjust_min_pipe_pixel_rate(crtc_state,
pixel_rate);
intel_state->min_pixclk[i] = pixel_rate;
intel_state->min_cdclk[i] = min_cdclk;
}
min_cdclk = 0;
for_each_pipe(dev_priv, pipe)
max_pixel_rate = max(intel_state->min_pixclk[pipe],
max_pixel_rate);
min_cdclk = max(intel_state->min_cdclk[pipe], min_cdclk);
return max_pixel_rate;
return min_cdclk;
}
static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
int max_pixclk = intel_max_pixel_rate(state);
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
int cdclk;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
int min_cdclk, cdclk;
cdclk = vlv_calc_cdclk(dev_priv, max_pixclk);
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
if (cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
cdclk, dev_priv->max_cdclk_freq);
return -EINVAL;
}
cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
intel_state->cdclk.logical.cdclk = cdclk;
@ -1847,22 +1853,18 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
int max_pixclk = intel_max_pixel_rate(state);
int cdclk;
int min_cdclk, cdclk;
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
/*
* FIXME should also account for plane ratio
* once 64bpp pixel formats are supported.
*/
cdclk = bdw_calc_cdclk(max_pixclk);
if (cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
cdclk, dev_priv->max_cdclk_freq);
return -EINVAL;
}
cdclk = bdw_calc_cdclk(min_cdclk);
intel_state->cdclk.logical.cdclk = cdclk;
@ -1880,10 +1882,13 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = to_i915(state->dev);
const int max_pixclk = intel_max_pixel_rate(state);
int cdclk, vco;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
int min_cdclk, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
vco = intel_state->cdclk.logical.vco;
if (!vco)
@ -1893,13 +1898,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
* FIXME should also account for plane ratio
* once 64bpp pixel formats are supported.
*/
cdclk = skl_calc_cdclk(max_pixclk, vco);
if (cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
cdclk, dev_priv->max_cdclk_freq);
return -EINVAL;
}
cdclk = skl_calc_cdclk(min_cdclk, vco);
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
@ -1920,25 +1919,21 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
int max_pixclk = intel_max_pixel_rate(state);
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
int cdclk, vco;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
int min_cdclk, cdclk, vco;
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
if (IS_GEMINILAKE(dev_priv)) {
cdclk = glk_calc_cdclk(max_pixclk);
cdclk = glk_calc_cdclk(min_cdclk);
vco = glk_de_pll_vco(dev_priv, cdclk);
} else {
cdclk = bxt_calc_cdclk(max_pixclk);
cdclk = bxt_calc_cdclk(min_cdclk);
vco = bxt_de_pll_vco(dev_priv, cdclk);
}
if (cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
cdclk, dev_priv->max_cdclk_freq);
return -EINVAL;
}
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
@ -1964,20 +1959,16 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
int max_pixclk = intel_max_pixel_rate(state);
int cdclk, vco;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
int min_cdclk, cdclk, vco;
cdclk = cnl_calc_cdclk(max_pixclk);
min_cdclk = intel_compute_min_cdclk(state);
if (min_cdclk < 0)
return min_cdclk;
cdclk = cnl_calc_cdclk(min_cdclk);
vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
if (cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
cdclk, dev_priv->max_cdclk_freq);
return -EINVAL;
}
intel_state->cdclk.logical.vco = vco;
intel_state->cdclk.logical.cdclk = cdclk;
@ -1999,14 +1990,21 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
if (IS_GEMINILAKE(dev_priv))
if (INTEL_GEN(dev_priv) >= 10)
/*
* FIXME: Allow '2 * max_cdclk_freq'
* once DDI clock voltage requirements are
* handled correctly.
*/
return max_cdclk_freq;
else if (IS_GEMINILAKE(dev_priv))
/*
* FIXME: Limiting to 99% as a temporary workaround. See
* glk_calc_cdclk() for details.
* intel_min_cdclk() for details.
*/
return 2 * max_cdclk_freq * 99 / 100;
else if (INTEL_INFO(dev_priv)->gen >= 9 ||
IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
else if (IS_GEN9(dev_priv) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return max_cdclk_freq;
else if (IS_CHERRYVIEW(dev_priv))
return max_cdclk_freq*95/100;

View File

@ -143,7 +143,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
const struct intel_crtc_state *crtc_state,
int mode)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@ -194,28 +194,28 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
}
static void intel_disable_crt(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_crt_set_dpms(encoder, old_crtc_state, DRM_MODE_DPMS_OFF);
}
static void pch_disable_crt(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
}
static void pch_post_disable_crt(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_disable_crt(encoder, old_crtc_state, old_conn_state);
}
static void hsw_post_disable_crt(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@ -228,8 +228,8 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
}
static void intel_enable_crt(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
intel_crt_set_dpms(encoder, pipe_config, DRM_MODE_DPMS_ON);
}

View File

@ -588,6 +588,67 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
}
}
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
return cnl_ddi_translations_hdmi_0_85V;
} else if (voltage == VOLTAGE_INFO_0_95V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V);
return cnl_ddi_translations_hdmi_0_95V;
} else if (voltage == VOLTAGE_INFO_1_05V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
return cnl_ddi_translations_hdmi_1_05V;
} else
MISSING_CASE(voltage);
return NULL;
}
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
return cnl_ddi_translations_dp_0_85V;
} else if (voltage == VOLTAGE_INFO_0_95V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V);
return cnl_ddi_translations_dp_0_95V;
} else if (voltage == VOLTAGE_INFO_1_05V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
return cnl_ddi_translations_dp_1_05V;
} else
MISSING_CASE(voltage);
return NULL;
}
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (dev_priv->vbt.edp.low_vswing) {
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
return cnl_ddi_translations_edp_0_85V;
} else if (voltage == VOLTAGE_INFO_0_95V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
return cnl_ddi_translations_edp_0_95V;
} else if (voltage == VOLTAGE_INFO_1_05V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
return cnl_ddi_translations_edp_1_05V;
} else
MISSING_CASE(voltage);
return NULL;
} else {
return cnl_get_buf_trans_dp(dev_priv, n_entries);
}
}
static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
{
int n_hdmi_entries;
@ -599,7 +660,10 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
if (IS_GEN9_LP(dev_priv))
return hdmi_level;
if (IS_GEN9_BC(dev_priv)) {
if (IS_CANNONLAKE(dev_priv)) {
cnl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
hdmi_default_entry = n_hdmi_entries - 1;
} else if (IS_GEN9_BC(dev_priv)) {
skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
hdmi_default_entry = 8;
} else if (IS_BROADWELL(dev_priv)) {
@ -688,9 +752,6 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
enum port port = intel_ddi_get_encoder_port(encoder);
const struct ddi_buf_trans *ddi_translations;
if (IS_GEN9_LP(dev_priv))
return;
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv,
@ -741,9 +802,6 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
enum port port = intel_ddi_get_encoder_port(encoder);
const struct ddi_buf_trans *ddi_translations_hdmi;
if (IS_GEN9_LP(dev_priv))
return;
hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
if (IS_GEN9_BC(dev_priv)) {
@ -785,7 +843,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
}
static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
{
switch (pll->id) {
case DPLL_ID_WRPLL1:
@ -1821,10 +1879,17 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int n_entries;
if (encoder->type == INTEL_OUTPUT_EDP)
intel_ddi_get_buf_trans_edp(dev_priv, &n_entries);
else
intel_ddi_get_buf_trans_dp(dev_priv, &n_entries);
if (IS_CANNONLAKE(dev_priv)) {
if (encoder->type == INTEL_OUTPUT_EDP)
cnl_get_buf_trans_edp(dev_priv, &n_entries);
else
cnl_get_buf_trans_dp(dev_priv, &n_entries);
} else {
if (encoder->type == INTEL_OUTPUT_EDP)
intel_ddi_get_buf_trans_edp(dev_priv, &n_entries);
else
intel_ddi_get_buf_trans_dp(dev_priv, &n_entries);
}
if (WARN_ON(n_entries < 1))
n_entries = 1;
@ -1835,90 +1900,23 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
DP_TRAIN_VOLTAGE_SWING_MASK;
}
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv,
u32 voltage, int *n_entries)
{
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
return cnl_ddi_translations_hdmi_0_85V;
} else if (voltage == VOLTAGE_INFO_0_95V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_95V);
return cnl_ddi_translations_hdmi_0_95V;
} else if (voltage == VOLTAGE_INFO_1_05V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_1_05V);
return cnl_ddi_translations_hdmi_1_05V;
}
return NULL;
}
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv,
u32 voltage, int *n_entries)
{
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
return cnl_ddi_translations_dp_0_85V;
} else if (voltage == VOLTAGE_INFO_0_95V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_95V);
return cnl_ddi_translations_dp_0_95V;
} else if (voltage == VOLTAGE_INFO_1_05V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_1_05V);
return cnl_ddi_translations_dp_1_05V;
}
return NULL;
}
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv,
u32 voltage, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
return cnl_ddi_translations_edp_0_85V;
} else if (voltage == VOLTAGE_INFO_0_95V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
return cnl_ddi_translations_edp_0_95V;
} else if (voltage == VOLTAGE_INFO_1_05V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_1_05V);
return cnl_ddi_translations_edp_1_05V;
}
return NULL;
} else {
return cnl_get_buf_trans_dp(dev_priv, voltage, n_entries);
}
}
static void cnl_ddi_vswing_program(struct drm_i915_private *dev_priv,
u32 level, enum port port, int type)
{
const struct cnl_ddi_buf_trans *ddi_translations = NULL;
u32 n_entries, val, voltage;
u32 n_entries, val;
int ln;
/*
* Values for each port type are listed in
* voltage swing programming tables.
* Vccio voltage found in PORT_COMP_DW3.
*/
voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (type == INTEL_OUTPUT_HDMI) {
ddi_translations = cnl_get_buf_trans_hdmi(dev_priv,
voltage, &n_entries);
ddi_translations = cnl_get_buf_trans_hdmi(dev_priv, &n_entries);
} else if (type == INTEL_OUTPUT_DP) {
ddi_translations = cnl_get_buf_trans_dp(dev_priv,
voltage, &n_entries);
ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
} else if (type == INTEL_OUTPUT_EDP) {
ddi_translations = cnl_get_buf_trans_edp(dev_priv,
voltage, &n_entries);
ddi_translations = cnl_get_buf_trans_edp(dev_priv, &n_entries);
}
if (ddi_translations == NULL) {
MISSING_CASE(voltage);
if (WARN_ON(ddi_translations == NULL))
return;
}
if (level >= n_entries) {
DRM_DEBUG_KMS("DDI translation not found for level %d. Using %d instead.", level, n_entries - 1);
@ -2054,33 +2052,46 @@ static uint32_t translate_signal_level(int signal_levels)
return 0;
}
static uint32_t intel_ddi_dp_level(struct intel_dp *intel_dp)
{
uint8_t train_set = intel_dp->train_set[0];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
return translate_signal_level(signal_levels);
}
u32 bxt_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
struct intel_encoder *encoder = &dport->base;
enum port port = dport->port;
u32 level = intel_ddi_dp_level(intel_dp);
if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, level);
else
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
return 0;
}
uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
struct intel_encoder *encoder = &dport->base;
uint8_t train_set = intel_dp->train_set[0];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
enum port port = dport->port;
uint32_t level;
level = translate_signal_level(signal_levels);
uint32_t level = intel_ddi_dp_level(intel_dp);
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
else if (IS_CANNONLAKE(dev_priv)) {
cnl_ddi_vswing_sequence(encoder, level);
/* DDI_BUF_CTL bits 27:24 are reserved on CNL */
return 0;
}
skl_ddi_set_iboost(encoder, level);
return DDI_BUF_TRANS_SELECT(level);
}
static void intel_ddi_clk_select(struct intel_encoder *encoder,
struct intel_shared_dpll *pll)
const struct intel_shared_dpll *pll)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
@ -2129,6 +2140,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
uint32_t level = intel_ddi_dp_level(intel_dp);
WARN_ON(link_mst && (port == PORT_A || port == PORT_E));
@ -2141,7 +2153,13 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
intel_prepare_dp_ddi_buffers(encoder);
if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, level);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
else
intel_prepare_dp_ddi_buffers(encoder);
intel_ddi_init_dp_buf_reg(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
@ -2150,14 +2168,14 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
bool has_hdmi_sink,
bool has_infoframe,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state,
struct intel_shared_dpll *pll)
const struct intel_shared_dpll *pll)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_encoder *drm_encoder = &encoder->base;
enum port port = intel_ddi_get_encoder_port(encoder);
int level = intel_ddi_hdmi_level(dev_priv, port);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
@ -2167,23 +2185,25 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
intel_prepare_hdmi_ddi_buffers(encoder);
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level);
if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, level);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port,
INTEL_OUTPUT_HDMI);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_vswing_sequence(encoder, level);
else
intel_prepare_hdmi_ddi_buffers(encoder);
intel_hdmi->set_infoframes(drm_encoder,
has_hdmi_sink,
crtc_state, conn_state);
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level);
intel_dig_port->set_infoframes(&encoder->base,
has_infoframe,
crtc_state, conn_state);
}
static void intel_ddi_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
int type = encoder->type;
@ -2197,21 +2217,20 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
}
if (type == INTEL_OUTPUT_HDMI) {
intel_ddi_pre_enable_hdmi(encoder,
pipe_config->has_hdmi_sink,
pipe_config->has_infoframe,
pipe_config, conn_state,
pipe_config->shared_dpll);
}
}
static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = NULL;
int type = intel_encoder->type;
uint32_t val;
bool wait = false;
@ -2219,7 +2238,8 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
/* old_crtc_state and old_conn_state are NULL when called from DP_MST */
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
intel_dp = enc_to_intel_dp(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
}
@ -2238,7 +2258,14 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
if (intel_dp) {
if (type == INTEL_OUTPUT_HDMI) {
dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
}
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
}
@ -2263,8 +2290,8 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
}
void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
uint32_t val;
@ -2296,8 +2323,8 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
}
static void intel_enable_ddi(struct intel_encoder *intel_encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
@ -2328,7 +2355,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(pipe_config, conn_state);
intel_psr_enable(intel_dp);
intel_psr_enable(intel_dp, pipe_config);
intel_edp_drrs_enable(intel_dp, pipe_config);
}
@ -2337,8 +2364,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
}
static void intel_disable_ddi(struct intel_encoder *intel_encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
@ -2356,14 +2383,14 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp);
intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(old_conn_state);
}
}
static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
uint8_t mask = pipe_config->lane_lat_optim_mask;
@ -2435,7 +2462,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_hdmi *intel_hdmi;
struct intel_digital_port *intel_dig_port;
u32 temp, flags = 0;
/* XXX: DSI transcoder paranoia */
@ -2474,9 +2501,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
pipe_config->has_hdmi_sink = true;
intel_hdmi = enc_to_intel_hdmi(&encoder->base);
intel_dig_port = enc_to_dig_port(&encoder->base);
if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
@ -2729,6 +2756,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
intel_infoframe_init(intel_dig_port);
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
goto err;

View File

@ -412,7 +412,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
else if (INTEL_INFO(dev_priv)->gen >= 9)
gen9_sseu_info_init(dev_priv);
info->has_snoop = !info->has_llc;
WARN_ON(info->has_snoop != !info->has_llc);
DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));

View File

@ -3770,8 +3770,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
if (!gpu_reset_clobbers_display(dev_priv)) {
/* for testing only restore the display */
ret = __intel_display_resume(dev, state, ctx);
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
} else {
/*
* The display has been reset as well,
@ -3804,15 +3804,14 @@ unlock:
clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
}
static void intel_update_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state)
static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
crtc->base.mode = crtc->base.state->mode;
crtc->base.mode = new_crtc_state->base.mode;
/*
* Update pipe size and adjust fitter if needed: the reason for this is
@ -3824,17 +3823,17 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
*/
I915_WRITE(PIPESRC(crtc->pipe),
((pipe_config->pipe_src_w - 1) << 16) |
(pipe_config->pipe_src_h - 1));
((new_crtc_state->pipe_src_w - 1) << 16) |
(new_crtc_state->pipe_src_h - 1));
/* on skylake this is done by detaching scalers */
if (INTEL_GEN(dev_priv) >= 9) {
skl_detach_scalers(crtc);
if (pipe_config->pch_pfit.enabled)
if (new_crtc_state->pch_pfit.enabled)
skylake_pfit_enable(crtc);
} else if (HAS_PCH_SPLIT(dev_priv)) {
if (pipe_config->pch_pfit.enabled)
if (new_crtc_state->pch_pfit.enabled)
ironlake_pfit_enable(crtc);
else if (old_crtc_state->pch_pfit.enabled)
ironlake_pfit_disable(crtc, true);
@ -5118,7 +5117,8 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_atomic_state *old_state = old_crtc_state->base.state;
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
crtc);
struct drm_plane *primary = crtc->base.primary;
struct drm_plane_state *old_pri_state =
drm_atomic_get_existing_plane_state(old_state, primary);
@ -5130,7 +5130,8 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (old_pri_state) {
struct intel_plane_state *primary_state =
to_intel_plane_state(primary->state);
intel_atomic_get_new_plane_state(to_intel_atomic_state(old_state),
to_intel_plane(primary));
struct intel_plane_state *old_primary_state =
to_intel_plane_state(old_pri_state);
@ -5159,7 +5160,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
if (old_pri_state) {
struct intel_plane_state *primary_state =
to_intel_plane_state(primary->state);
intel_atomic_get_new_plane_state(old_intel_state,
to_intel_plane(primary));
struct intel_plane_state *old_primary_state =
to_intel_plane_state(old_pri_state);
@ -6038,7 +6040,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
intel_crtc->enabled_power_domains = 0;
dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
dev_priv->min_pixclk[intel_crtc->pipe] = 0;
dev_priv->min_cdclk[intel_crtc->pipe] = 0;
}
/*
@ -6283,6 +6285,9 @@ retry:
static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
struct intel_crtc_state *pipe_config)
{
if (pipe_config->ips_force_disable)
return false;
if (pipe_config->pipe_bpp > 24)
return false;
@ -9039,7 +9044,7 @@ static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
u32 temp;
temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
id = temp >> (port * 2);
id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
return;
@ -9753,7 +9758,7 @@ static void i9xx_disable_cursor(struct intel_plane *plane,
/* VESA 640x480x72Hz mode to set on the pipe */
static struct drm_display_mode load_detect_mode = {
static const struct drm_display_mode load_detect_mode = {
DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
};
@ -9788,7 +9793,7 @@ intel_framebuffer_pitch_for_width(int width, int bpp)
}
static u32
intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
intel_framebuffer_size_for_mode(const struct drm_display_mode *mode, int bpp)
{
u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
return PAGE_ALIGN(pitch * mode->vdisplay);
@ -9796,7 +9801,7 @@ intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
static struct drm_framebuffer *
intel_framebuffer_create_for_mode(struct drm_device *dev,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
int depth, int bpp)
{
struct drm_framebuffer *fb;
@ -9823,7 +9828,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
static struct drm_framebuffer *
mode_fits_in_fbdev(struct drm_device *dev,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_i915_private *dev_priv = to_i915(dev);
@ -9856,7 +9861,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_framebuffer *fb,
int x, int y)
{
@ -9890,7 +9895,7 @@ static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
}
int intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx)
{
@ -10337,7 +10342,7 @@ static bool intel_wm_need_update(struct drm_plane *plane,
return false;
}
static bool needs_scaling(struct intel_plane_state *state)
static bool needs_scaling(const struct intel_plane_state *state)
{
int src_w = drm_rect_width(&state->base.src) >> 16;
int src_h = drm_rect_height(&state->base.src) >> 16;
@ -10347,7 +10352,9 @@ static bool needs_scaling(struct intel_plane_state *state)
return (src_w != dst_w || src_h != dst_h);
}
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
struct drm_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct drm_plane_state *plane_state)
{
struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
@ -10356,10 +10363,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct intel_plane *plane = to_intel_plane(plane_state->plane);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->base.state);
bool mode_changed = needs_modeset(crtc_state);
bool was_crtc_enabled = crtc->state->active;
bool was_crtc_enabled = old_crtc_state->base.active;
bool is_crtc_enabled = crtc_state->active;
bool turn_off, turn_on, visible, was_visible;
struct drm_framebuffer *fb = plane_state->fb;
@ -10850,7 +10855,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
struct intel_dpll_hw_state dpll_hw_state;
struct intel_shared_dpll *shared_dpll;
struct intel_crtc_wm_state wm_state;
bool force_thru;
bool force_thru, ips_force_disable;
/* FIXME: before the switch to atomic started, a new pipe_config was
* kzalloc'd. Code that depends on any field being zero should be
@ -10861,6 +10866,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
shared_dpll = crtc_state->shared_dpll;
dpll_hw_state = crtc_state->dpll_hw_state;
force_thru = crtc_state->pch_pfit.force_thru;
ips_force_disable = crtc_state->ips_force_disable;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
wm_state = crtc_state->wm;
@ -10874,6 +10880,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
crtc_state->shared_dpll = shared_dpll;
crtc_state->dpll_hw_state = dpll_hw_state;
crtc_state->pch_pfit.force_thru = force_thru;
crtc_state->ips_force_disable = ips_force_disable;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
crtc_state->wm = wm_state;
@ -12206,7 +12213,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
unsigned int cmask = drm_crtc_mask(crtc);
intel_crtc = to_intel_crtc(crtc);
cstate = to_intel_crtc_state(crtc->state);
cstate = to_intel_crtc_state(new_crtc_state);
pipe = intel_crtc->pipe;
if (updated & cmask || !cstate->base.active)
@ -12334,7 +12341,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
if (!crtc->state->active) {
if (!new_crtc_state->active) {
/*
* Make sure we don't call initial_watermarks
* for ILK-style watermark updates.
@ -12343,7 +12350,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
*/
if (INTEL_GEN(dev_priv) >= 9)
dev_priv->display.initial_watermarks(intel_state,
to_intel_crtc_state(crtc->state));
to_intel_crtc_state(new_crtc_state));
}
}
}
@ -12556,8 +12563,8 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_atomic_track_fbs(state);
if (intel_state->modeset) {
memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
sizeof(intel_state->min_pixclk));
memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
sizeof(intel_state->min_cdclk));
dev_priv->active_crtcs = intel_state->active_crtcs;
dev_priv->cdclk.logical = intel_state->cdclk.logical;
dev_priv->cdclk.actual = intel_state->cdclk.actual;
@ -12586,6 +12593,58 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.set_crc_source = intel_crtc_set_crc_source,
};
struct wait_rps_boost {
struct wait_queue_entry wait;
struct drm_crtc *crtc;
struct drm_i915_gem_request *request;
};
static int do_rps_boost(struct wait_queue_entry *_wait,
unsigned mode, int sync, void *key)
{
struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
struct drm_i915_gem_request *rq = wait->request;
gen6_rps_boost(rq, NULL);
i915_gem_request_put(rq);
drm_crtc_vblank_put(wait->crtc);
list_del(&wait->wait.entry);
kfree(wait);
return 1;
}
static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
struct dma_fence *fence)
{
struct wait_rps_boost *wait;
if (!dma_fence_is_i915(fence))
return;
if (INTEL_GEN(to_i915(crtc->dev)) < 6)
return;
if (drm_crtc_vblank_get(crtc))
return;
wait = kmalloc(sizeof(*wait), GFP_KERNEL);
if (!wait) {
drm_crtc_vblank_put(crtc);
return;
}
wait->request = to_request(dma_fence_get(fence));
wait->crtc = crtc;
wait->wait.func = do_rps_boost;
wait->wait.flags = 0;
add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
}
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @plane: drm plane to prepare for
@ -12683,12 +12742,22 @@ intel_prepare_plane_fb(struct drm_plane *plane,
return ret;
if (!new_state->fence) { /* implicit fencing */
struct dma_fence *fence;
ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
obj->resv, NULL,
false, I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
return ret;
fence = reservation_object_get_excl_rcu(obj->resv);
if (fence) {
add_rps_boost_after_vblank(new_state->crtc, fence);
dma_fence_put(fence);
}
} else {
add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
}
return 0;
@ -12805,29 +12874,29 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *intel_cstate =
to_intel_crtc_state(crtc->state);
struct intel_crtc_state *old_intel_cstate =
to_intel_crtc_state(old_crtc_state);
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_crtc_state->state);
bool modeset = needs_modeset(crtc->state);
struct intel_crtc_state *intel_cstate =
intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
bool modeset = needs_modeset(&intel_cstate->base);
if (!modeset &&
(intel_cstate->base.color_mgmt_changed ||
intel_cstate->update_pipe)) {
intel_color_set_csc(crtc->state);
intel_color_load_luts(crtc->state);
intel_color_set_csc(&intel_cstate->base);
intel_color_load_luts(&intel_cstate->base);
}
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(intel_crtc);
intel_pipe_update_start(intel_cstate);
if (modeset)
goto out;
if (intel_cstate->update_pipe)
intel_update_pipe_config(intel_crtc, old_intel_cstate);
intel_update_pipe_config(old_intel_cstate, intel_cstate);
else if (INTEL_GEN(dev_priv) >= 9)
skl_detach_scalers(intel_crtc);
@ -12841,8 +12910,12 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_crtc_state->state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
intel_pipe_update_end(intel_crtc);
intel_pipe_update_end(new_crtc_state);
}
/**
@ -13029,6 +13102,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
new_plane_state->crtc_h = crtc_h;
ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
to_intel_plane_state(plane->state),
to_intel_plane_state(new_plane_state));
if (ret)
goto out_free;
@ -13600,7 +13675,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
if (has_edp_a(dev_priv))
intel_dp_init(dev_priv, DP_A, PORT_A);
@ -13643,14 +13718,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
has_edp = intel_dp_is_edp(dev_priv, PORT_B);
has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
has_edp = intel_dp_is_edp(dev_priv, PORT_C);
has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
@ -14967,7 +15042,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int pixclk = 0;
int min_cdclk = 0;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
if (crtc_state->base.active) {
@ -14988,22 +15063,18 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
intel_crtc_compute_pixel_rate(crtc_state);
if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
pixclk = crtc_state->pixel_rate;
else
WARN_ON(dev_priv->display.modeset_calc_cdclk);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
pixclk = DIV_ROUND_UP(pixclk * 100, 95);
if (dev_priv->display.modeset_calc_cdclk) {
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
if (WARN_ON(min_cdclk < 0))
min_cdclk = 0;
}
drm_calc_timestamping_constants(&crtc->base,
&crtc_state->base.adjusted_mode);
update_scanline_offset(crtc);
}
dev_priv->min_pixclk[crtc->pipe] = pixclk;
dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
intel_pipe_config_sanity_check(dev_priv, crtc_state);
}

View File

@ -103,13 +103,13 @@ static const int cnl_rates[] = { 162000, 216000, 270000,
static const int default_rates[] = { 162000, 270000, 540000 };
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
*
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
*/
static bool is_edp(struct intel_dp *intel_dp)
bool intel_dp_is_edp(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@ -388,7 +388,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
if (is_edp(intel_dp) && fixed_mode) {
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
@ -597,7 +597,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
WARN_ON(!is_edp(intel_dp));
WARN_ON(!intel_dp_is_edp(intel_dp));
WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
intel_dp->active_pipe != intel_dp->pps_pipe);
@ -644,7 +644,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
WARN_ON(!is_edp(intel_dp));
WARN_ON(!intel_dp_is_edp(intel_dp));
/*
* TODO: BXT has 2 PPS instances. The correct port->PPS instance
@ -847,7 +847,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
if (!is_edp(intel_dp) || code != SYS_RESTART)
if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
return 0;
pps_lock(intel_dp);
@ -907,7 +907,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return;
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
@ -1681,7 +1681,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
else
pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
struct drm_display_mode *panel_mode =
intel_connector->panel.alt_fixed_mode;
struct drm_display_mode *req_mode = &pipe_config->base.mode;
@ -1736,7 +1736,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
if (is_edp(intel_dp)) {
if (intel_dp_is_edp(intel_dp)) {
/* Get bpp from vbt only for panels that dont have bpp in edid */
if (intel_connector->base.display_info.bpc == 0 &&
@ -1829,7 +1829,7 @@ found:
* DPLL0 VCO may need to be adjusted to get the correct
* clock for eDP. This will affect cdclk as well.
*/
if (is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
int vco;
switch (pipe_config->port_clock / 2) {
@ -1861,7 +1861,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
}
static void intel_dp_prepare(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
const struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -2069,7 +2069,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return false;
cancel_delayed_work(&intel_dp->panel_vdd_work);
@ -2119,7 +2119,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
bool vdd;
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@ -2203,7 +2203,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
lockdep_assert_held(&dev_priv->pps_mutex);
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return;
I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
@ -2226,7 +2226,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
@ -2267,7 +2267,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
void intel_edp_panel_on(struct intel_dp *intel_dp)
{
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@ -2285,7 +2285,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
@ -2316,7 +2316,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
void intel_edp_panel_off(struct intel_dp *intel_dp)
{
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@ -2360,7 +2360,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("\n");
@ -2377,7 +2377,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
u32 pp;
i915_reg_t pp_ctrl_reg;
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@ -2401,7 +2401,7 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("\n");
@ -2461,7 +2461,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
const struct intel_crtc_state *pipe_config)
{
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@ -2666,7 +2666,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
/*
* This is a big fat ugly hack.
@ -2688,8 +2688,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
static void intel_disable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@ -2698,7 +2698,7 @@ static void intel_disable_dp(struct intel_encoder *encoder,
intel_audio_codec_disable(encoder);
if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
intel_psr_disable(intel_dp);
intel_psr_disable(intel_dp, old_crtc_state);
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
@ -2713,8 +2713,8 @@ static void intel_disable_dp(struct intel_encoder *encoder,
}
static void ilk_post_disable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@ -2727,8 +2727,8 @@ static void ilk_post_disable_dp(struct intel_encoder *encoder,
}
static void vlv_post_disable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
@ -2736,8 +2736,8 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder,
}
static void chv_post_disable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@ -2842,7 +2842,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
}
static void intel_dp_enable_port(struct intel_dp *intel_dp,
struct intel_crtc_state *old_crtc_state)
const struct intel_crtc_state *old_crtc_state)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
@ -2866,8 +2866,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
}
static void intel_enable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
@ -2914,26 +2914,26 @@ static void intel_enable_dp(struct intel_encoder *encoder,
}
static void g4x_enable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
intel_enable_dp(encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
static void vlv_enable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_edp_backlight_on(pipe_config, conn_state);
intel_psr_enable(intel_dp);
intel_psr_enable(intel_dp, pipe_config);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
@ -3040,7 +3040,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
intel_dp->active_pipe = crtc->pipe;
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return;
/* now it's all ours */
@ -3055,8 +3055,8 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
}
static void vlv_pre_enable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
vlv_phy_pre_encoder_enable(encoder);
@ -3064,8 +3064,8 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder,
}
static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
intel_dp_prepare(encoder, pipe_config);
@ -3073,8 +3073,8 @@ static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
}
static void chv_pre_enable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
chv_phy_pre_encoder_enable(encoder);
@ -3085,8 +3085,8 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder,
}
static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
intel_dp_prepare(encoder, pipe_config);
@ -3094,8 +3094,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
}
static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
chv_phy_post_pll_disable(encoder);
}
@ -3506,13 +3506,11 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
uint32_t signal_levels, mask = 0;
uint8_t train_set = intel_dp->train_set[0];
if (HAS_DDI(dev_priv)) {
if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv)) {
signal_levels = bxt_signal_levels(intel_dp);
} else if (HAS_DDI(dev_priv)) {
signal_levels = ddi_signal_levels(intel_dp);
if (IS_GEN9_LP(dev_priv) || IS_CANNONLAKE(dev_priv))
signal_levels = 0;
else
mask = DDI_BUF_EMP_MASK;
mask = DDI_BUF_EMP_MASK;
} else if (IS_CHERRYVIEW(dev_priv)) {
signal_levels = chv_signal_levels(intel_dp);
} else if (IS_VALLEYVIEW(dev_priv)) {
@ -3784,7 +3782,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return false;
/* Don't clobber cached eDP rates. */
if (!is_edp(intel_dp)) {
if (!intel_dp_is_edp(intel_dp)) {
intel_dp_set_sink_rates(intel_dp);
intel_dp_set_common_rates(intel_dp);
}
@ -3806,7 +3804,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* downstream port information. So, an early return here saves
* time from performing other operations which are not required.
*/
if (!is_edp(intel_dp) && !intel_dp->sink_count)
if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
return false;
if (!drm_dp_is_branch(intel_dp->dpcd))
@ -4396,7 +4394,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
if (is_edp(intel_dp))
if (intel_dp_is_edp(intel_dp))
return connector_status_connected;
/* if there's no downstream port, we're done */
@ -4712,7 +4710,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain);
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp))
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
else if (intel_digital_port_connected(to_i915(dev),
dp_to_dig_port(intel_dp)))
@ -4792,7 +4790,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
if (is_edp(intel_dp) || intel_connector->detect_edid)
if (intel_dp_is_edp(intel_dp) || intel_connector->detect_edid)
status = connector_status_connected;
intel_dp->detect_done = true;
@ -4876,7 +4874,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, fall back to fixed mode */
if (is_edp(intel_attached_dp(connector)) &&
if (intel_dp_is_edp(intel_attached_dp(connector)) &&
intel_connector->panel.fixed_mode) {
struct drm_display_mode *mode;
@ -4927,8 +4925,10 @@ intel_dp_connector_destroy(struct drm_connector *connector)
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
/* Can't call is_edp() since the encoder may have been destroyed
* already. */
/*
* Can't call intel_dp_is_edp() since the encoder may have been
* destroyed already.
*/
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
intel_panel_fini(&intel_connector->panel);
@ -4942,7 +4942,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
struct intel_dp *intel_dp = &intel_dig_port->dp;
intel_dp_mst_encoder_cleanup(intel_dig_port);
if (is_edp(intel_dp)) {
if (intel_dp_is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
@ -4968,7 +4968,7 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return;
/*
@ -5036,7 +5036,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->active_pipe = vlv_active_pipe(intel_dp);
if (is_edp(intel_dp)) {
if (intel_dp_is_edp(intel_dp)) {
/* Reinit the power sequencer, in case BIOS did something with it. */
intel_dp_pps_init(encoder->dev, intel_dp);
intel_edp_panel_vdd_sanitize(intel_dp);
@ -5137,7 +5137,7 @@ put_power:
}
/* check the VBT to see whether the eDP is on another port */
bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
{
/*
* eDP not supported on g4x. so bail out early just
@ -5160,7 +5160,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
if (is_edp(intel_dp)) {
if (intel_dp_is_edp(intel_dp)) {
u32 allowed_scalers;
allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
@ -5448,7 +5448,7 @@ static void intel_dp_pps_init(struct drm_device *dev,
* The caller of this function needs to take a lock on dev_priv->drrs.
*/
static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state,
const struct intel_crtc_state *crtc_state,
int refresh_rate)
{
struct intel_encoder *encoder;
@ -5545,7 +5545,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
* Initializes frontbuffer_bits and drrs.dp
*/
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
const struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
@ -5576,7 +5576,7 @@ unlock:
*
*/
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
struct intel_crtc_state *old_crtc_state)
const struct intel_crtc_state *old_crtc_state)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
@ -5826,7 +5826,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
if (!is_edp(intel_dp))
if (!intel_dp_is_edp(intel_dp))
return true;
/*
@ -6042,7 +6042,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
if (intel_dp_is_edp(dev_priv, port))
if (intel_dp_is_port_edp(dev_priv, port))
type = DRM_MODE_CONNECTOR_eDP;
else
type = DRM_MODE_CONNECTOR_DisplayPort;
@ -6060,7 +6060,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
/* eDP only on port B and/or C on vlv/chv */
if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
is_edp(intel_dp) && port != PORT_B && port != PORT_C))
intel_dp_is_edp(intel_dp) &&
port != PORT_B && port != PORT_C))
return false;
DRM_DEBUG_KMS("Adding %s connector on port %c\n",
@ -6088,7 +6089,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_connector->get_hw_state = intel_connector_get_hw_state;
/* init MST on ports that can support it */
if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
@ -6186,6 +6187,9 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
dev_priv->hotplug.irq_port[port] = intel_dig_port;
if (port != PORT_A)
intel_infoframe_init(intel_dig_port);
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;

View File

@ -123,8 +123,8 @@ static int intel_dp_mst_atomic_check(struct drm_connector *connector,
}
static void intel_mst_disable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@ -146,8 +146,8 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@ -176,8 +176,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
}
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
@ -219,8 +219,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;

View File

@ -220,23 +220,23 @@ struct intel_encoder {
struct intel_crtc_state *,
struct drm_connector_state *);
void (*pre_pll_enable)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
const struct intel_crtc_state *,
const struct drm_connector_state *);
void (*pre_enable)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
const struct intel_crtc_state *,
const struct drm_connector_state *);
void (*enable)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
const struct intel_crtc_state *,
const struct drm_connector_state *);
void (*disable)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
const struct intel_crtc_state *,
const struct drm_connector_state *);
void (*post_disable)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
const struct intel_crtc_state *,
const struct drm_connector_state *);
void (*post_pll_disable)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
const struct intel_crtc_state *,
const struct drm_connector_state *);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
@ -384,7 +384,8 @@ struct intel_atomic_state {
unsigned int active_pipe_changes;
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@ -753,6 +754,7 @@ struct intel_crtc_state {
struct intel_link_m_n fdi_m_n;
bool ips_enabled;
bool ips_force_disable;
bool enable_fbc;
@ -909,16 +911,6 @@ struct intel_hdmi {
bool has_audio;
bool rgb_quant_range_selectable;
struct intel_connector *attached_connector;
void (*write_infoframe)(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool (*infoframe_enabled)(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config);
};
struct intel_dp_mst_encoder;
@ -1069,6 +1061,17 @@ struct intel_digital_port {
bool release_cl2_override;
uint8_t max_lanes;
enum intel_display_power_domain ddi_io_power_domain;
void (*write_infoframe)(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
enum hdmi_infoframe_type type,
const void *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool (*infoframe_enabled)(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config);
};
struct intel_dp_mst_encoder {
@ -1189,6 +1192,30 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
static inline struct intel_plane_state *
intel_atomic_get_new_plane_state(struct intel_atomic_state *state,
struct intel_plane *plane)
{
return to_intel_plane_state(drm_atomic_get_new_plane_state(&state->base,
&plane->base));
}
static inline struct intel_crtc_state *
intel_atomic_get_old_crtc_state(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
return to_intel_crtc_state(drm_atomic_get_old_crtc_state(&state->base,
&crtc->base));
}
static inline struct intel_crtc_state *
intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
return to_intel_crtc_state(drm_atomic_get_new_crtc_state(&state->base,
&crtc->base));
}
/* intel_fifo_underrun.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
@ -1205,11 +1232,8 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 mask);
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
@ -1246,8 +1270,8 @@ void intel_crt_reset(struct drm_encoder *encoder);
/* intel_ddi.c */
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state);
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
void hsw_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
@ -1272,6 +1296,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
u32 bxt_signal_levels(struct intel_dp *intel_dp);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
@ -1290,6 +1315,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
/* intel_cdclk.c */
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
void skl_init_cdclk(struct drm_i915_private *dev_priv);
void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
void cnl_init_cdclk(struct drm_i915_private *dev_priv);
@ -1377,7 +1403,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dport,
unsigned int expected_mask);
int intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
void intel_release_load_detect_pipe(struct drm_connector *connector,
@ -1401,7 +1427,9 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val);
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
struct drm_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct drm_plane_state *plane_state);
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
@ -1499,7 +1527,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
@ -1518,9 +1547,9 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
void intel_plane_destroy(struct drm_plane *plane);
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
const struct intel_crtc_state *crtc_state);
void intel_edp_drrs_disable(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
const struct intel_crtc_state *crtc_state);
void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
@ -1648,6 +1677,7 @@ void intel_hdmi_handle_sink_scrambling(struct intel_encoder *intel_encoder,
bool high_tmds_clock_ratio,
bool scrambling);
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
/* intel_lvds.c */
@ -1719,8 +1749,10 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con
/* intel_psr.c */
void intel_psr_enable(struct intel_dp *intel_dp);
void intel_psr_disable(struct intel_dp *intel_dp);
void intel_psr_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
void intel_psr_flush(struct drm_i915_private *dev_priv,
@ -1844,7 +1876,6 @@ void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_gem_request *rq,
struct intel_rps_client *rps);
void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
@ -1884,8 +1915,8 @@ struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(struct intel_crtc *crtc);
void intel_pipe_update_end(struct intel_crtc *crtc);
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
@ -1957,7 +1988,9 @@ struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
/* intel_color.c */

View File

@ -731,7 +731,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
struct intel_crtc_state *pipe_config);
const struct intel_crtc_state *pipe_config);
static void intel_dsi_unprepare(struct intel_encoder *encoder);
static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
@ -783,8 +783,8 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
*/
static void intel_dsi_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@ -878,8 +878,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
* the pre_enable hook.
*/
static void intel_dsi_enable_nop(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
DRM_DEBUG_KMS("\n");
}
@ -889,8 +889,8 @@ static void intel_dsi_enable_nop(struct intel_encoder *encoder,
* the post_disable hook.
*/
static void intel_dsi_disable(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@ -925,8 +925,8 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
}
static void intel_dsi_post_disable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@ -1066,7 +1066,7 @@ out_put_power:
}
static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -1370,7 +1370,7 @@ static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt)
}
static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
struct intel_crtc_state *pipe_config)
const struct intel_crtc_state *pipe_config)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;

View File

@ -175,8 +175,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
}
static void intel_disable_dvo(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@ -189,8 +189,8 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
}
static void intel_enable_dvo(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@ -258,8 +258,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
}
static void intel_dvo_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);

View File

@ -1065,6 +1065,51 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
return 0;
}
static int cnl_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
int ret;
/* WaDisableI2mCycleOnWRPort: cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
WA_SET_BIT(GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
/* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
/* WaInPlaceDecompressionHang:cnl */
WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaPushConstantDereferenceHoldDisable:cnl */
WA_SET_BIT(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
/* FtrEnableFastAnisoL1BankingFix: cnl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
/* WaEnablePreemptionGranularityControlByUMD:cnl */
ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
if (ret)
return ret;
return 0;
}
static int kbl_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@ -1185,6 +1230,8 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
err = glk_init_workarounds(engine);
else if (IS_COFFEELAKE(dev_priv))
err = cfl_init_workarounds(engine);
else if (IS_CANNONLAKE(dev_priv))
err = cnl_init_workarounds(engine);
else
err = 0;
if (err)
@ -1335,6 +1382,21 @@ void intel_engines_mark_idle(struct drm_i915_private *i915)
}
}
bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
{
switch (INTEL_GEN(engine->i915)) {
case 2:
return false; /* uses physical not virtual addresses */
case 3:
/* maybe only uses physical not virtual addresses */
return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
case 6:
return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
default:
return true;
}
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
#endif

View File

@ -291,6 +291,19 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold;
/* Display WA #0529: skl, kbl, bxt. */
if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
u32 val = I915_READ(CHICKEN_MISC_4);
val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
if (i915_gem_object_get_tiling(params->vma->obj) !=
I915_TILING_X)
val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
I915_WRITE(CHICKEN_MISC_4, val);
}
dpfc_ctl = 0;
if (IS_IVYBRIDGE(dev_priv))
dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
@ -881,6 +894,10 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->fb.stride = cache->fb.stride;
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
32 * fbc->threshold) * 8;
}
static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,

View File

@ -206,6 +206,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
mutex_lock(&dev->struct_mutex);
intel_runtime_pm_get(dev_priv);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
@ -269,6 +270,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
fb->width, fb->height, i915_ggtt_offset(vma));
ifbdev->vma = vma;
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
@ -276,6 +278,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_unpin:
intel_unpin_fb_vma(vma);
out_unlock:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
return ret;
}

View File

@ -434,7 +434,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
const struct intel_crtc_state *crtc_state,
union hdmi_infoframe *frame)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
uint8_t buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
@ -450,7 +450,7 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
buffer[3] = 0;
len++;
intel_hdmi->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
intel_dig_port->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
@ -945,6 +945,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp, flags = 0;
@ -965,7 +966,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
if (tmp & SDVO_AUDIO_ENABLE)
@ -991,8 +992,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
}
static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
@ -1003,8 +1004,8 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
}
static void g4x_enable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -1025,8 +1026,8 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
}
static void ibx_enable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -1075,8 +1076,8 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
}
static void cpt_enable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -1130,18 +1131,20 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
}
static void vlv_enable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
}
static void intel_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
u32 temp;
@ -1184,14 +1187,15 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
intel_hdmi->set_infoframes(&encoder->base, false, old_crtc_state, old_conn_state);
intel_dig_port->set_infoframes(&encoder->base, false,
old_crtc_state, old_conn_state);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
static void g4x_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
@ -1200,16 +1204,16 @@ static void g4x_disable_hdmi(struct intel_encoder *encoder,
}
static void pch_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
}
static void pch_post_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_disable_hdmi(encoder, old_crtc_state, old_conn_state);
}
@ -1314,7 +1318,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
return status;
}
static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
static bool hdmi_12bpc_possible(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->base.crtc->dev);
@ -1642,24 +1646,24 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
}
static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(&encoder->base);
intel_hdmi_prepare(encoder, pipe_config);
intel_hdmi->set_infoframes(&encoder->base,
pipe_config->has_hdmi_sink,
pipe_config, conn_state);
intel_dig_port->set_infoframes(&encoder->base,
pipe_config->has_infoframe,
pipe_config, conn_state);
}
static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -1669,9 +1673,9 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
0x2b247878);
intel_hdmi->set_infoframes(&encoder->base,
pipe_config->has_hdmi_sink,
pipe_config, conn_state);
dport->set_infoframes(&encoder->base,
pipe_config->has_infoframe,
pipe_config, conn_state);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
@ -1679,8 +1683,8 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
}
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
intel_hdmi_prepare(encoder, pipe_config);
@ -1688,8 +1692,8 @@ static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
}
static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
intel_hdmi_prepare(encoder, pipe_config);
@ -1697,23 +1701,23 @@ static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder,
}
static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
chv_phy_post_pll_disable(encoder);
}
static void vlv_hdmi_post_disable(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
/* Reset lanes to avoid HDMI flicker (VLV w/a) */
vlv_phy_reset_lanes(encoder);
}
static void chv_hdmi_post_disable(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -1727,11 +1731,10 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder,
}
static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dport->hdmi;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -1741,9 +1744,9 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
/* Use 800mV-0dB */
chv_set_phy_signal_level(encoder, 128, 102, false);
intel_hdmi->set_infoframes(&encoder->base,
pipe_config->has_hdmi_sink,
pipe_config, conn_state);
dport->set_infoframes(&encoder->base,
pipe_config->has_infoframe,
pipe_config, conn_state);
g4x_enable_hdmi(encoder, pipe_config, conn_state);
@ -1958,6 +1961,34 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
return ddc_pin;
}
void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
{
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_dig_port->write_infoframe = vlv_write_infoframe;
intel_dig_port->set_infoframes = vlv_set_infoframes;
intel_dig_port->infoframe_enabled = vlv_infoframe_enabled;
} else if (IS_G4X(dev_priv)) {
intel_dig_port->write_infoframe = g4x_write_infoframe;
intel_dig_port->set_infoframes = g4x_set_infoframes;
intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
} else if (HAS_DDI(dev_priv)) {
intel_dig_port->write_infoframe = hsw_write_infoframe;
intel_dig_port->set_infoframes = hsw_set_infoframes;
intel_dig_port->infoframe_enabled = hsw_infoframe_enabled;
} else if (HAS_PCH_IBX(dev_priv)) {
intel_dig_port->write_infoframe = ibx_write_infoframe;
intel_dig_port->set_infoframes = ibx_set_infoframes;
intel_dig_port->infoframe_enabled = ibx_infoframe_enabled;
} else {
intel_dig_port->write_infoframe = cpt_write_infoframe;
intel_dig_port->set_infoframes = cpt_set_infoframes;
intel_dig_port->infoframe_enabled = cpt_infoframe_enabled;
}
}
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{
@ -1993,28 +2024,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
return;
intel_encoder->hpd_pin = intel_hpd_pin(port);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
} else if (IS_G4X(dev_priv)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
} else if (HAS_DDI(dev_priv)) {
intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes;
intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
} else if (HAS_PCH_IBX(dev_priv)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
intel_hdmi->set_infoframes = ibx_set_infoframes;
intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
} else {
intel_hdmi->write_infoframe = cpt_write_infoframe;
intel_hdmi->set_infoframes = cpt_set_infoframes;
intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
}
if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
@ -2113,5 +2122,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = 4;
intel_infoframe_init(intel_dig_port);
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}

View File

@ -1175,6 +1175,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return -EINVAL;
switch (INTEL_GEN(engine->i915)) {
case 10:
return 0;
case 9:
wa_bb_fn[0] = gen9_init_indirectctx_bb;
wa_bb_fn[1] = gen9_init_perctx_bb;

View File

@ -229,8 +229,8 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@ -306,8 +306,8 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
* Sets the power state for the panel.
*/
static void intel_enable_lvds(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
@ -324,8 +324,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
}
static void intel_disable_lvds(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@ -339,8 +339,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
}
static void gmch_disable_lvds(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
@ -349,15 +349,15 @@ static void gmch_disable_lvds(struct intel_encoder *encoder,
}
static void pch_disable_lvds(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_panel_disable_backlight(old_conn_state);
}
static void pch_post_disable_lvds(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_disable_lvds(encoder, old_crtc_state, old_conn_state);
}

View File

@ -506,8 +506,8 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
bool enable)
static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
bool enable)
{
struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
@ -533,10 +533,24 @@ retry:
goto put_state;
}
pipe_config->pch_pfit.force_thru = enable;
if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
pipe_config->pch_pfit.enabled != enable)
pipe_config->base.connectors_changed = true;
if (HAS_IPS(dev_priv)) {
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
* user space can't make reliable use of the CRCs, so let's just
* completely disable it.
*/
pipe_config->ips_force_disable = enable;
if (pipe_config->ips_enabled == enable)
pipe_config->base.connectors_changed = true;
}
if (IS_HASWELL(dev_priv)) {
pipe_config->pch_pfit.force_thru = enable;
if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
pipe_config->pch_pfit.enabled != enable)
pipe_config->base.connectors_changed = true;
}
ret = drm_atomic_commit(state);
@ -570,8 +584,9 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_PF:
if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
hsw_trans_edp_pipe_A_crc_wa(dev_priv, true);
if ((IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
hsw_pipe_A_crc_wa(dev_priv, true);
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
@ -606,7 +621,6 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
enum intel_pipe_crc_source source)
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
@ -643,14 +657,6 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
goto out;
}
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
* user space can't make reliable use of the CRCs, so let's just
* completely disable it.
*/
hsw_disable_ips(crtc);
spin_lock_irq(&pipe_crc->lock);
kfree(pipe_crc->entries);
pipe_crc->entries = entries;
@ -691,10 +697,9 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
g4x_undo_pipe_scramble_reset(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, pipe);
else if (IS_HASWELL(dev_priv) && pipe == PIPE_A)
hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
hsw_enable_ips(crtc);
else if ((IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
hsw_pipe_A_crc_wa(dev_priv, false);
}
ret = 0;
@ -914,7 +919,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
u32 val = 0; /* shut up gcc */
@ -935,16 +939,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
if (ret != 0)
goto out;
if (source) {
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
* user space can't make reliable use of the CRCs, so let's just
* completely disable it.
*/
hsw_disable_ips(intel_crtc);
}
I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
POSTING_READ(PIPE_CRC_CTL(crtc->index));
@ -953,10 +947,9 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
else if (IS_HASWELL(dev_priv) && crtc->index == PIPE_A)
hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
hsw_enable_ips(intel_crtc);
else if ((IS_HASWELL(dev_priv) ||
IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A)
hsw_pipe_A_crc_wa(dev_priv, false);
}
pipe_crc->skipped = 0;

View File

@ -125,6 +125,7 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 val;
gen9_init_clock_gating(dev_priv);
/*
@ -144,6 +145,11 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(CHICKEN_MISC_2, val);
}
/* Display WA #1133: WaFbcSkipSegments:glk */
val = I915_READ(ILK_DPFC_CHICKEN);
val &= ~GLK_SKIP_SEG_COUNT_MASK;
val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
I915_WRITE(ILK_DPFC_CHICKEN, val);
}
static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
@ -1322,21 +1328,21 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
int num_active_planes = hweight32(crtc_state->active_planes &
~BIT(PLANE_CURSOR));
const struct g4x_pipe_wm *raw;
struct intel_plane_state *plane_state;
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
struct intel_plane *plane;
enum plane_id plane_id;
int i, level;
unsigned int dirty = 0;
for_each_intel_plane_in_state(state, plane, plane_state, i) {
const struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->base.state);
if (plane_state->base.crtc != &crtc->base &&
for_each_oldnew_intel_plane_in_state(state, plane,
old_plane_state,
new_plane_state, i) {
if (new_plane_state->base.crtc != &crtc->base &&
old_plane_state->base.crtc != &crtc->base)
continue;
if (g4x_raw_plane_wm_compute(crtc_state, plane_state))
if (g4x_raw_plane_wm_compute(crtc_state, new_plane_state))
dirty |= BIT(plane->id);
}
@ -1831,21 +1837,21 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
int num_active_planes = hweight32(crtc_state->active_planes &
~BIT(PLANE_CURSOR));
bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
struct intel_plane_state *plane_state;
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
struct intel_plane *plane;
enum plane_id plane_id;
int level, ret, i;
unsigned int dirty = 0;
for_each_intel_plane_in_state(state, plane, plane_state, i) {
const struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->base.state);
if (plane_state->base.crtc != &crtc->base &&
for_each_oldnew_intel_plane_in_state(state, plane,
old_plane_state,
new_plane_state, i) {
if (new_plane_state->base.crtc != &crtc->base &&
old_plane_state->base.crtc != &crtc->base)
continue;
if (vlv_raw_plane_wm_compute(crtc_state, plane_state))
if (vlv_raw_plane_wm_compute(crtc_state, new_plane_state))
dirty |= BIT(plane->id);
}
@ -1864,7 +1870,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
/* cursor changes don't warrant a FIFO recompute */
if (dirty & ~BIT(PLANE_CURSOR)) {
const struct intel_crtc_state *old_crtc_state =
to_intel_crtc_state(crtc->base.state);
intel_atomic_get_old_crtc_state(state, crtc);
const struct vlv_fifo_state *old_fifo_state =
&old_crtc_state->wm.vlv.fifo_state;
@ -6169,6 +6175,7 @@ void gen6_rps_boost(struct drm_i915_gem_request *rq,
struct intel_rps_client *rps)
{
struct drm_i915_private *i915 = rq->i915;
unsigned long flags;
bool boost;
/* This is intentionally racy! We peek at the state here, then
@ -6178,13 +6185,13 @@ void gen6_rps_boost(struct drm_i915_gem_request *rq,
return;
boost = false;
spin_lock_irq(&rq->lock);
spin_lock_irqsave(&rq->lock, flags);
if (!rq->waitboost && !i915_gem_request_completed(rq)) {
atomic_inc(&i915->rps.num_waiters);
rq->waitboost = true;
boost = true;
}
spin_unlock_irq(&rq->lock);
spin_unlock_irqrestore(&rq->lock, flags);
if (!boost)
return;
@ -7980,7 +7987,7 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
*/
}
static void ironlake_init_clock_gating(struct drm_i915_private *dev_priv)
static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
{
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
@ -8263,7 +8270,56 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
}
static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (!HAS_PCH_CNP(dev_priv))
return;
/* Wa #1181 */
I915_WRITE(SOUTH_DSPCLK_GATE_D, CNP_PWM_CGE_GATING_DISABLE);
}
static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
{
u32 val;
cnp_init_clock_gating(dev_priv);
/* This is not an Wa. Enable for better image quality */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
/* WaEnableChickenDCPR:cnl */
I915_WRITE(GEN8_CHICKEN_DCPR_1,
I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
/* WaFbcWakeMemOn:cnl */
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_FBC_MEMORY_WAKE);
/* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
SARBUNIT_CLKGATE_DIS);
/* Display WA #1133: WaFbcSkipSegments:cnl */
val = I915_READ(ILK_DPFC_CHICKEN);
val &= ~GLK_SKIP_SEG_COUNT_MASK;
val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
I915_WRITE(ILK_DPFC_CHICKEN, val);
}
static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
{
cnp_init_clock_gating(dev_priv);
gen9_init_clock_gating(dev_priv);
/* WaFbcNukeOnHostModify:cfl */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
{
gen9_init_clock_gating(dev_priv);
@ -8277,12 +8333,12 @@ static void kabylake_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
/* WaFbcNukeOnHostModify:kbl,cfl */
/* WaFbcNukeOnHostModify:kbl */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
{
gen9_init_clock_gating(dev_priv);
@ -8295,7 +8351,7 @@ static void skylake_init_clock_gating(struct drm_i915_private *dev_priv)
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@ -8353,7 +8409,7 @@ static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
{
ilk_init_lp_watermarks(dev_priv);
@ -8407,7 +8463,7 @@ static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
lpt_init_clock_gating(dev_priv);
}
static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
{
uint32_t snpcr;
@ -8504,7 +8560,7 @@ static void ivybridge_init_clock_gating(struct drm_i915_private *dev_priv)
gen6_check_mch_setup(dev_priv);
}
static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaDisableEarlyCull:vlv */
I915_WRITE(_3D_CHICKEN3,
@ -8584,7 +8640,7 @@ static void valleyview_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
static void cherryview_init_clock_gating(struct drm_i915_private *dev_priv)
static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
{
/* WaVSRefCountFullforceMissDisable:chv */
/* WaDSRefCountFullforceMissDisable:chv */
@ -8644,7 +8700,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
g4x_disable_trickle_feed(dev_priv);
}
static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
{
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
@ -8658,7 +8714,7 @@ static void crestline_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
static void broadwater_init_clock_gating(struct drm_i915_private *dev_priv)
static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
{
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
@ -8743,34 +8799,38 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
if (IS_SKYLAKE(dev_priv))
dev_priv->display.init_clock_gating = skylake_init_clock_gating;
else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
if (IS_CANNONLAKE(dev_priv))
dev_priv->display.init_clock_gating = cnl_init_clock_gating;
else if (IS_COFFEELAKE(dev_priv))
dev_priv->display.init_clock_gating = cfl_init_clock_gating;
else if (IS_SKYLAKE(dev_priv))
dev_priv->display.init_clock_gating = skl_init_clock_gating;
else if (IS_KABYLAKE(dev_priv))
dev_priv->display.init_clock_gating = kbl_init_clock_gating;
else if (IS_BROXTON(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating;
else if (IS_GEMINILAKE(dev_priv))
dev_priv->display.init_clock_gating = glk_init_clock_gating;
else if (IS_BROADWELL(dev_priv))
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
dev_priv->display.init_clock_gating = bdw_init_clock_gating;
else if (IS_CHERRYVIEW(dev_priv))
dev_priv->display.init_clock_gating = cherryview_init_clock_gating;
dev_priv->display.init_clock_gating = chv_init_clock_gating;
else if (IS_HASWELL(dev_priv))
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
dev_priv->display.init_clock_gating = hsw_init_clock_gating;
else if (IS_IVYBRIDGE(dev_priv))
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
dev_priv->display.init_clock_gating = ivb_init_clock_gating;
else if (IS_VALLEYVIEW(dev_priv))
dev_priv->display.init_clock_gating = valleyview_init_clock_gating;
dev_priv->display.init_clock_gating = vlv_init_clock_gating;
else if (IS_GEN6(dev_priv))
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
else if (IS_GEN5(dev_priv))
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
dev_priv->display.init_clock_gating = ilk_init_clock_gating;
else if (IS_G4X(dev_priv))
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
else if (IS_I965GM(dev_priv))
dev_priv->display.init_clock_gating = crestline_init_clock_gating;
dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
else if (IS_I965G(dev_priv))
dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
dev_priv->display.init_clock_gating = i965g_init_clock_gating;
else if (IS_GEN3(dev_priv))
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
@ -9132,43 +9192,6 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
}
struct request_boost {
struct work_struct work;
struct drm_i915_gem_request *req;
};
static void __intel_rps_boost_work(struct work_struct *work)
{
struct request_boost *boost = container_of(work, struct request_boost, work);
struct drm_i915_gem_request *req = boost->req;
if (!i915_gem_request_completed(req))
gen6_rps_boost(req, NULL);
i915_gem_request_put(req);
kfree(boost);
}
void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
{
struct request_boost *boost;
if (req == NULL || INTEL_GEN(req->i915) < 6)
return;
if (i915_gem_request_completed(req))
return;
boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
if (boost == NULL)
return;
boost->req = i915_gem_request_get(req);
INIT_WORK(&boost->work, __intel_rps_boost_work);
queue_work(req->i915->wq, &boost->work);
}
void intel_pm_setup(struct drm_i915_private *dev_priv)
{
mutex_init(&dev_priv->rps.hw_lock);

View File

@ -103,28 +103,26 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
POSTING_READ(ctl_reg);
}
static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
static void vlv_psr_setup_vsc(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t val;
/* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
val = I915_READ(VLV_VSCSDP(pipe));
val = I915_READ(VLV_VSCSDP(crtc->pipe));
val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
I915_WRITE(VLV_VSCSDP(pipe), val);
I915_WRITE(VLV_VSCSDP(crtc->pipe), val);
}
static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct edp_vsc_psr psr_vsc;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
struct edp_vsc_psr psr_vsc;
/* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
memset(&psr_vsc, 0, sizeof(psr_vsc));
@ -145,7 +143,8 @@ static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
static void hsw_psr_setup_vsc(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct edp_vsc_psr psr_vsc;
@ -233,16 +232,15 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
I915_WRITE(aux_ctl_reg, aux_ctl);
}
static void vlv_psr_enable_source(struct intel_dp *intel_dp)
static void vlv_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc = dig_port->base.base.crtc;
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
/* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
I915_WRITE(VLV_PSRCTL(pipe),
I915_WRITE(VLV_PSRCTL(crtc->pipe),
VLV_EDP_PSR_MODE_SW_TIMER |
VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
VLV_EDP_PSR_ENABLE);
@ -485,16 +483,17 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
/**
* intel_psr_enable - Enable PSR
* @intel_dp: Intel DP
* @crtc_state: new CRTC state
*
* This function can only be called after the pipe is fully trained and enabled.
*/
void intel_psr_enable(struct intel_dp *intel_dp)
void intel_psr_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 chicken;
if (!HAS_PSR(dev_priv)) {
@ -520,11 +519,13 @@ void intel_psr_enable(struct intel_dp *intel_dp)
if (HAS_DDI(dev_priv)) {
if (dev_priv->psr.psr2_support) {
skl_psr_setup_su_vsc(intel_dp);
skl_psr_setup_su_vsc(intel_dp, crtc_state);
chicken = PSR2_VSC_ENABLE_PROG_HEADER;
if (dev_priv->psr.y_cord_support)
chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
I915_WRITE(EDP_PSR_DEBUG_CTL,
EDP_PSR_DEBUG_MASK_MEMUP |
EDP_PSR_DEBUG_MASK_HPD |
@ -533,7 +534,8 @@ void intel_psr_enable(struct intel_dp *intel_dp)
EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
} else {
/* set up vsc header for psr1 */
hsw_psr_setup_vsc(intel_dp);
hsw_psr_setup_vsc(intel_dp, crtc_state);
/*
* Per Spec: Avoid continuous PSR exit by masking MEMUP
* and HPD. also mask LPSP to avoid dependency on other
@ -553,7 +555,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 9)
intel_psr_activate(intel_dp);
} else {
vlv_psr_setup_vsc(intel_dp);
vlv_psr_setup_vsc(intel_dp, crtc_state);
/* Enable PSR on the panel */
vlv_psr_enable_sink(intel_dp);
@ -564,7 +566,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
* but let it on inactive state. So we might do this prior
* to active transition, i.e. here.
*/
vlv_psr_enable_source(intel_dp);
vlv_psr_enable_source(intel_dp, crtc_state);
}
/*
@ -585,37 +587,38 @@ unlock:
mutex_unlock(&dev_priv->psr.lock);
}
static void vlv_psr_disable(struct intel_dp *intel_dp)
static void vlv_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
uint32_t val;
if (dev_priv->psr.active) {
/* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
if (intel_wait_for_register(dev_priv,
VLV_PSRSTAT(intel_crtc->pipe),
VLV_PSRSTAT(crtc->pipe),
VLV_EDP_PSR_IN_TRANS,
0,
1))
WARN(1, "PSR transition took longer than expected\n");
val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
val = I915_READ(VLV_PSRCTL(crtc->pipe));
val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
val &= ~VLV_EDP_PSR_ENABLE;
val &= ~VLV_EDP_PSR_MODE_MASK;
I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
I915_WRITE(VLV_PSRCTL(crtc->pipe), val);
dev_priv->psr.active = false;
} else {
WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
WARN_ON(vlv_is_psr_active_on_pipe(dev, crtc->pipe));
}
}
static void hsw_psr_disable(struct intel_dp *intel_dp)
static void hsw_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@ -664,10 +667,12 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
/**
* intel_psr_disable - Disable PSR
* @intel_dp: Intel DP
* @old_crtc_state: old CRTC state
*
* This function needs to be called before disabling pipe.
*/
void intel_psr_disable(struct intel_dp *intel_dp)
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
@ -681,9 +686,9 @@ void intel_psr_disable(struct intel_dp *intel_dp)
/* Disable PSR on Source */
if (HAS_DDI(dev_priv))
hsw_psr_disable(intel_dp);
hsw_psr_disable(intel_dp, old_crtc_state);
else
vlv_psr_disable(intel_dp);
vlv_psr_disable(intel_dp, old_crtc_state);
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);

View File

@ -735,16 +735,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
void intel_engines_mark_idle(struct drm_i915_private *i915);
void intel_engines_reset_default_submission(struct drm_i915_private *i915);
static inline bool
__intel_engine_can_store_dword(unsigned int gen, unsigned int class)
{
if (gen <= 2)
return false; /* uses physical not virtual addresses */
if (gen == 6 && class == VIDEO_DECODE_CLASS)
return false; /* b0rked */
return true;
}
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
#endif /* _INTEL_RINGBUFFER_H_ */

View File

@ -2707,30 +2707,67 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
usleep_range(10, 30); /* 10 us delay per Bspec */
}
#define CNL_PROCMON_IDX(val) \
(((val) & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) >> VOLTAGE_INFO_SHIFT)
#define NUM_CNL_PROCMON \
(CNL_PROCMON_IDX(VOLTAGE_INFO_MASK | PROCESS_INFO_MASK) + 1)
enum {
PROCMON_0_85V_DOT_0,
PROCMON_0_95V_DOT_0,
PROCMON_0_95V_DOT_1,
PROCMON_1_05V_DOT_0,
PROCMON_1_05V_DOT_1,
};
static const struct cnl_procmon {
u32 dw1, dw9, dw10;
} cnl_procmon_values[NUM_CNL_PROCMON] = {
[CNL_PROCMON_IDX(VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0)] =
{ .dw1 = 0x00 << 16, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
[CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0)] =
{ .dw1 = 0x00 << 16, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
[CNL_PROCMON_IDX(VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1)] =
{ .dw1 = 0x00 << 16, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
[CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0)] =
{ .dw1 = 0x00 << 16, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
[CNL_PROCMON_IDX(VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1)] =
{ .dw1 = 0x44 << 16, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
} cnl_procmon_values[] = {
[PROCMON_0_85V_DOT_0] =
{ .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
[PROCMON_0_95V_DOT_0] =
{ .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
[PROCMON_0_95V_DOT_1] =
{ .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
[PROCMON_1_05V_DOT_0] =
{ .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
[PROCMON_1_05V_DOT_1] =
{ .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
};
static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv)
{
const struct cnl_procmon *procmon;
u32 val;
val = I915_READ(CNL_PORT_COMP_DW3);
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
break;
case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
break;
case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
break;
case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
break;
case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
break;
}
val = I915_READ(CNL_PORT_COMP_DW1);
val &= ~((0xff << 16) | 0xff);
val |= procmon->dw1;
I915_WRITE(CNL_PORT_COMP_DW1, val);
I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
}
static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
const struct cnl_procmon *procmon;
struct i915_power_well *well;
u32 val;
@ -2746,18 +2783,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
val &= ~CNL_COMP_PWR_DOWN;
I915_WRITE(CHICKEN_MISC_2, val);
val = I915_READ(CNL_PORT_COMP_DW3);
procmon = &cnl_procmon_values[CNL_PROCMON_IDX(val)];
WARN_ON(procmon->dw10 == 0);
val = I915_READ(CNL_PORT_COMP_DW1);
val &= ~((0xff << 16) | 0xff);
val |= procmon->dw1;
I915_WRITE(CNL_PORT_COMP_DW1, val);
I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9);
I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10);
cnl_set_procmon_ref_values(dev_priv);
val = I915_READ(CNL_PORT_COMP_DW0);
val |= COMP_INIT;
@ -2784,9 +2810,6 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
gen9_dbuf_enable(dev_priv);
}
#undef CNL_PROCMON_IDX
#undef NUM_CNL_PROCMON
static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;

View File

@ -201,11 +201,8 @@ to_intel_sdvo_connector(struct drm_connector *connector)
return container_of(connector, struct intel_sdvo_connector, base.base);
}
static struct intel_sdvo_connector_state *
to_intel_sdvo_connector_state(struct drm_connector_state *conn_state)
{
return container_of(conn_state, struct intel_sdvo_connector_state, base.base);
}
#define to_intel_sdvo_connector_state(conn_state) \
container_of((conn_state), struct intel_sdvo_connector_state, base.base)
static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags);
@ -998,7 +995,7 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
}
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
struct intel_crtc_state *pipe_config)
const struct intel_crtc_state *pipe_config)
{
uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
union hdmi_infoframe frame;
@ -1032,7 +1029,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
}
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
struct drm_connector_state *conn_state)
const struct drm_connector_state *conn_state)
{
struct intel_sdvo_tv_format format;
uint32_t format_map;
@ -1202,9 +1199,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
} while (0)
static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector_state *sdvo_state)
const struct intel_sdvo_connector_state *sdvo_state)
{
struct drm_connector_state *conn_state = &sdvo_state->base.base;
const struct drm_connector_state *conn_state = &sdvo_state->base.base;
struct intel_sdvo_connector *intel_sdvo_conn =
to_intel_sdvo_connector(conn_state->connector);
uint16_t val;
@ -1258,14 +1255,15 @@ static void intel_sdvo_update_props(struct intel_sdvo *intel_sdvo,
}
static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
struct intel_sdvo_connector_state *sdvo_state = to_intel_sdvo_connector_state(conn_state);
struct drm_display_mode *mode = &crtc_state->base.mode;
const struct intel_sdvo_connector_state *sdvo_state =
to_intel_sdvo_connector_state(conn_state);
const struct drm_display_mode *mode = &crtc_state->base.mode;
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
@ -1507,8 +1505,8 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
static void intel_disable_sdvo(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
@ -1552,21 +1550,21 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
}
static void pch_disable_sdvo(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
}
static void pch_post_disable_sdvo(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
intel_disable_sdvo(encoder, old_crtc_state, old_conn_state);
}
static void intel_enable_sdvo(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);

View File

@ -70,8 +70,7 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
/**
* intel_pipe_update_start() - start update of a set of display registers
* @crtc: the crtc of which the registers are going to be updated
* @start_vbl_count: vblank counter return pointer used for error checking
* @new_crtc_state: the new crtc state
*
* Mark the start of an update to pipe registers that should be updated
* atomically regarding vblank. If the next vblank will happens within
@ -79,18 +78,18 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*
* After a successful call to this function, interrupts will be disabled
* until a subsequent call to intel_pipe_update_end(). That is done to
* avoid random delays. The value written to @start_vbl_count should be
* supplied to intel_pipe_update_end() for error checking.
* avoid random delays.
*/
void intel_pipe_update_start(struct intel_crtc *crtc)
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
const struct drm_display_mode *adjusted_mode = &new_crtc_state->base.adjusted_mode;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI);
intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
DEFINE_WAIT(wait);
vblank_start = adjusted_mode->crtc_vblank_start;
@ -170,15 +169,15 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
/**
* intel_pipe_update_end() - end update of a set of display registers
* @crtc: the crtc of which the registers were updated
* @start_vbl_count: start vblank counter (used for error checking)
* @new_crtc_state: the new crtc state
*
* Mark the end of an update started with intel_pipe_update_start(). This
* re-enables interrupts and verifies the update was actually completed
* before a vblank using the value of @start_vbl_count.
* before a vblank.
*/
void intel_pipe_update_end(struct intel_crtc *crtc)
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
@ -191,14 +190,14 @@ void intel_pipe_update_end(struct intel_crtc *crtc)
* Would be slightly nice to just grab the vblank count and arm the
* event outside of the critical section - the spinlock might spin for a
* while ... */
if (crtc->base.state->event) {
if (new_crtc_state->base.event) {
WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
spin_lock(&crtc->base.dev->event_lock);
drm_crtc_arm_vblank_event(&crtc->base, crtc->base.state->event);
drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->base.event);
spin_unlock(&crtc->base.dev->event_lock);
crtc->base.state->event = NULL;
new_crtc_state->base.event = NULL;
}
local_irq_enable();

View File

@ -814,8 +814,8 @@ intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
static void
intel_enable_tv(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -829,8 +829,8 @@ intel_enable_tv(struct intel_encoder *encoder,
static void
intel_disable_tv(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@ -838,7 +838,7 @@ intel_disable_tv(struct intel_encoder *encoder,
I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
}
static const struct tv_mode *intel_tv_mode_find(struct drm_connector_state *conn_state)
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
{
int format = conn_state->tv.mode;
@ -976,8 +976,8 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
}
static void intel_tv_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);

View File

@ -1251,7 +1251,7 @@ static const struct register_whitelist {
} whitelist[] = {
{ .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE),
.offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE),
.size = 8, .gen_bitmask = GEN_RANGE(4, 9) },
.size = 8, .gen_bitmask = GEN_RANGE(4, 10) },
};
int i915_reg_read_ioctl(struct drm_device *dev,

View File

@ -149,16 +149,19 @@ struct bdb_general_features {
u8 ssc_freq:1;
u8 enable_lfp_on_override:1;
u8 disable_ssc_ddt:1;
u8 rsvd7:1;
u8 underscan_vga_timings:1;
u8 display_clock_mode:1;
u8 rsvd8:1; /* finish byte */
u8 vbios_hotplug_support:1;
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
u8 rsvd9:1;
u8 rotate_180:1; /* 181 */
u8 fdi_rx_polarity_inverted:1;
u8 rsvd10:4; /* finish byte */
u8 vbios_extended_mode:1; /* 160 */
u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160 */
u8 panel_best_fit_timing:1; /* 160 */
u8 ignore_strap_state:1; /* 160 */
/* bits 4 */
u8 legacy_monitor_detect;
@ -167,9 +170,10 @@ struct bdb_general_features {
u8 int_crt_support:1;
u8 int_tv_support:1;
u8 int_efp_support:1;
u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
u8 dp_ssc_enable:1; /* PCH attached eDP supports SSC */
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
u8 rsvd11:3; /* finish byte */
u8 dp_ssc_dongle_supported:1;
u8 rsvd11:2; /* finish byte */
} __packed;
/* pre-915 */
@ -206,6 +210,56 @@ struct bdb_general_features {
#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
/* Add the device class for LFP, TV, HDMI */
#define DEVICE_TYPE_INT_LFP 0x1022
#define DEVICE_TYPE_INT_TV 0x1009
#define DEVICE_TYPE_HDMI 0x60D2
#define DEVICE_TYPE_DP 0x68C6
#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
#define DEVICE_TYPE_eDP 0x78C6
#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
/*
* Bits we care about when checking for DEVICE_TYPE_eDP. Depending on the
* system, the other bits may or may not be set for eDP outputs.
*/
#define DEVICE_TYPE_eDP_BITS \
(DEVICE_TYPE_INTERNAL_CONNECTOR | \
DEVICE_TYPE_MIPI_OUTPUT | \
DEVICE_TYPE_COMPOSITE_OUTPUT | \
DEVICE_TYPE_DUAL_CHANNEL | \
DEVICE_TYPE_LVDS_SINGALING | \
DEVICE_TYPE_TMDS_DVI_SIGNALING | \
DEVICE_TYPE_VIDEO_SIGNALING | \
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
DEVICE_TYPE_ANALOG_OUTPUT)
#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
(DEVICE_TYPE_INTERNAL_CONNECTOR | \
DEVICE_TYPE_MIPI_OUTPUT | \
DEVICE_TYPE_COMPOSITE_OUTPUT | \
DEVICE_TYPE_LVDS_SINGALING | \
DEVICE_TYPE_TMDS_DVI_SIGNALING | \
DEVICE_TYPE_VIDEO_SIGNALING | \
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
DEVICE_TYPE_DIGITAL_OUTPUT | \
DEVICE_TYPE_ANALOG_OUTPUT)
#define DEVICE_CFG_NONE 0x00
#define DEVICE_CFG_12BIT_DVOB 0x01
#define DEVICE_CFG_12BIT_DVOC 0x02
@ -226,77 +280,126 @@ struct bdb_general_features {
#define DEVICE_WIRE_DVOB_MASTER 0x0d
#define DEVICE_WIRE_DVOC_MASTER 0x0e
/* dvo_port pre BDB 155 */
#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
#define DEVICE_PORT_DVOB 0x01
#define DEVICE_PORT_DVOC 0x02
/* dvo_port BDB 155+ */
#define DVO_PORT_HDMIA 0
#define DVO_PORT_HDMIB 1
#define DVO_PORT_HDMIC 2
#define DVO_PORT_HDMID 3
#define DVO_PORT_LVDS 4
#define DVO_PORT_TV 5
#define DVO_PORT_CRT 6
#define DVO_PORT_DPB 7
#define DVO_PORT_DPC 8
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
#define DVO_PORT_DPE 11 /* 193 */
#define DVO_PORT_HDMIE 12 /* 193 */
#define DVO_PORT_MIPIA 21 /* 171 */
#define DVO_PORT_MIPIB 22 /* 171 */
#define DVO_PORT_MIPIC 23 /* 171 */
#define DVO_PORT_MIPID 24 /* 171 */
#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33
/*
* We used to keep this struct but without any version control. We should avoid
* using it in the future, but it should be safe to keep using it in the old
* code. Do not change; we rely on its size.
* The child device config, aka the display device data structure, provides a
* description of a port and its configuration on the platform.
*
* The child device config size has been increased, and fields have been added
* and their meaning has changed over time. Care must be taken when accessing
* basically any of the fields to ensure the correct interpretation for the BDB
* version in question.
*
* When we copy the child device configs to dev_priv->vbt.child_dev, we reserve
* space for the full structure below, and initialize the tail not actually
* present in VBT to zeros. Accessing those fields is fine, as long as the
* default zero is taken into account, again according to the BDB version.
*
* BDB versions 155 and below are considered legacy, and version 155 seems to be
* a baseline for some of the VBT documentation. When adding new fields, please
* include the BDB version when the field was added, if it's above that.
*/
struct old_child_dev_config {
struct child_device_config {
u16 handle;
u16 device_type;
u8 device_id[10]; /* ascii string */
u16 device_type; /* See DEVICE_TYPE_* above */
union {
u8 device_id[10]; /* ascii string */
struct {
u8 i2c_speed;
u8 dp_onboard_redriver; /* 158 */
u8 dp_ondock_redriver; /* 158 */
u8 hdmi_level_shifter_value:4; /* 169 */
u8 hdmi_max_data_rate:4; /* 204 */
u16 dtd_buf_ptr; /* 161 */
u8 edidless_efp:1; /* 161 */
u8 compression_enable:1; /* 198 */
u8 compression_method:1; /* 198 */
u8 ganged_edp:1; /* 202 */
u8 reserved0:4;
u8 compression_structure_index:4; /* 198 */
u8 reserved1:4;
u8 slave_port; /* 202 */
u8 reserved2;
} __packed;
} __packed;
u16 addin_offset;
u8 dvo_port; /* See Device_PORT_* above */
u8 i2c_pin;
u8 slave_addr;
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_cfg; /* See DEVICE_CFG_* above */
u8 dvo2_port;
u8 i2c2_pin;
u8 slave2_addr;
u8 ddc2_pin;
u8 capabilities;
u8 dvo_wiring;/* See DEVICE_WIRE_* above */
u8 dvo2_wiring;
u16 extended_type;
u8 dvo_function;
} __packed;
/* This one contains field offsets that are known to be common for all BDB
* versions. Notice that the meaning of the contents contents may still change,
* but at least the offsets are consistent. */
struct common_child_dev_config {
u16 handle;
u16 device_type;
u8 not_common1[12];
u8 dvo_port;
u8 not_common2[2];
u8 dvo_port; /* See DEVICE_PORT_* and DVO_PORT_* above */
u8 i2c_pin;
u8 slave_addr;
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_cfg; /* See DEVICE_CFG_* above */
u8 efp_routed:1;
u8 lane_reversal:1;
u8 lspcon:1;
u8 iboost:1;
u8 hpd_invert:1;
u8 flag_reserved:3;
u8 hdmi_support:1;
u8 dp_support:1;
u8 tmds_support:1;
u8 support_reserved:5;
u8 aux_channel;
u8 not_common3[11];
u8 iboost_level;
} __packed;
union {
struct {
u8 dvo2_port;
u8 i2c2_pin;
u8 slave2_addr;
u8 ddc2_pin;
} __packed;
struct {
u8 efp_routed:1; /* 158 */
u8 lane_reversal:1; /* 184 */
u8 lspcon:1; /* 192 */
u8 iboost:1; /* 196 */
u8 hpd_invert:1; /* 196 */
u8 flag_reserved:3;
u8 hdmi_support:1; /* 158 */
u8 dp_support:1; /* 158 */
u8 tmds_support:1; /* 158 */
u8 support_reserved:5;
u8 aux_channel;
u8 dongle_detect;
} __packed;
} __packed;
/* This field changes depending on the BDB version, so the most reliable way to
* read it is by checking the BDB version and reading the raw pointer. */
union child_device_config {
/* This one is safe to be used anywhere, but the code should still check
* the BDB version. */
u8 raw[33];
/* This one should only be kept for legacy code. */
struct old_child_dev_config old;
/* This one should also be safe to use anywhere, even without version
* checks. */
struct common_child_dev_config common;
u8 pipe_cap:2;
u8 sdvo_stall:1; /* 158 */
u8 hpd_status:2;
u8 integrated_encoder:1;
u8 capabilities_reserved:2;
u8 dvo_wiring; /* See DEVICE_WIRE_* above */
union {
u8 dvo2_wiring;
u8 mipi_bridge_type; /* 171 */
} __packed;
u16 extended_type;
u8 dvo_function;
u8 dp_usb_type_c:1; /* 195 */
u8 flags2_reserved:7; /* 195 */
u8 dp_gpio_index; /* 195 */
u16 dp_gpio_pin_num; /* 195 */
u8 dp_iboost_level:4; /* 196 */
u8 hdmi_iboost_level:4; /* 196 */
} __packed;
struct bdb_general_definitions {
@ -585,23 +688,38 @@ struct bdb_driver_features {
#define EDP_VSWING_1_2V 3
struct edp_link_params {
struct edp_fast_link_params {
u8 rate:4;
u8 lanes:4;
u8 preemphasis:4;
u8 vswing:4;
} __packed;
struct edp_pwm_delays {
u16 pwm_on_to_backlight_enable;
u16 backlight_disable_to_pwm_off;
} __packed;
struct edp_full_link_params {
u8 preemphasis:4;
u8 vswing:4;
} __packed;
struct bdb_edp {
struct edp_power_seq power_seqs[16];
u32 color_depth;
struct edp_link_params link_params[16];
struct edp_fast_link_params fast_link_params[16];
u32 sdrrs_msa_timing_delay;
/* ith bit indicates enabled/disabled for (i+1)th panel */
u16 edp_s3d_feature;
u16 edp_t3_optimization;
u64 edp_vswing_preemph; /* v173 */
u16 edp_s3d_feature; /* 162 */
u16 edp_t3_optimization; /* 165 */
u64 edp_vswing_preemph; /* 173 */
u16 fast_link_training; /* 182 */
u16 dpcd_600h_write_required; /* 185 */
struct edp_pwm_delays pwm_delays[16]; /* 186 */
u16 full_link_params_provided; /* 199 */
struct edp_full_link_params full_link_params[16]; /* 199 */
} __packed;
struct psr_table {
@ -745,81 +863,6 @@ struct bdb_psr {
#define SWF14_APM_STANDBY 0x1
#define SWF14_APM_RESTORE 0x0
/* Add the device class for LFP, TV, HDMI */
#define DEVICE_TYPE_INT_LFP 0x1022
#define DEVICE_TYPE_INT_TV 0x1009
#define DEVICE_TYPE_HDMI 0x60D2
#define DEVICE_TYPE_DP 0x68C6
#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
#define DEVICE_TYPE_eDP 0x78C6
#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
/*
* Bits we care about when checking for DEVICE_TYPE_eDP
* Depending on the system, the other bits may or may not
* be set for eDP outputs.
*/
#define DEVICE_TYPE_eDP_BITS \
(DEVICE_TYPE_INTERNAL_CONNECTOR | \
DEVICE_TYPE_MIPI_OUTPUT | \
DEVICE_TYPE_COMPOSITE_OUTPUT | \
DEVICE_TYPE_DUAL_CHANNEL | \
DEVICE_TYPE_LVDS_SINGALING | \
DEVICE_TYPE_TMDS_DVI_SIGNALING | \
DEVICE_TYPE_VIDEO_SIGNALING | \
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
DEVICE_TYPE_ANALOG_OUTPUT)
#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
(DEVICE_TYPE_INTERNAL_CONNECTOR | \
DEVICE_TYPE_MIPI_OUTPUT | \
DEVICE_TYPE_COMPOSITE_OUTPUT | \
DEVICE_TYPE_LVDS_SINGALING | \
DEVICE_TYPE_TMDS_DVI_SIGNALING | \
DEVICE_TYPE_VIDEO_SIGNALING | \
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
DEVICE_TYPE_DIGITAL_OUTPUT | \
DEVICE_TYPE_ANALOG_OUTPUT)
/* define the DVO port for HDMI output type */
#define DVO_B 1
#define DVO_C 2
#define DVO_D 3
/* Possible values for the "DVO Port" field for versions >= 155: */
#define DVO_PORT_HDMIA 0
#define DVO_PORT_HDMIB 1
#define DVO_PORT_HDMIC 2
#define DVO_PORT_HDMID 3
#define DVO_PORT_LVDS 4
#define DVO_PORT_TV 5
#define DVO_PORT_CRT 6
#define DVO_PORT_DPB 7
#define DVO_PORT_DPC 8
#define DVO_PORT_DPD 9
#define DVO_PORT_DPA 10
#define DVO_PORT_DPE 11
#define DVO_PORT_HDMIE 12
#define DVO_PORT_MIPIA 21
#define DVO_PORT_MIPIB 22
#define DVO_PORT_MIPIC 23
#define DVO_PORT_MIPID 24
/* Block 52 contains MIPI configuration block
* 6 * bdb_mipi_config, followed by 6 pps data block
* block below

View File

@ -118,92 +118,125 @@
#define INTEL_IRONLAKE_M_IDS(info) \
INTEL_VGA_DEVICE(0x0046, info)
#define INTEL_SNB_D_IDS(info) \
#define INTEL_SNB_D_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0102, info), \
INTEL_VGA_DEVICE(0x0112, info), \
INTEL_VGA_DEVICE(0x0122, info), \
INTEL_VGA_DEVICE(0x010A, info)
#define INTEL_SNB_M_IDS(info) \
INTEL_VGA_DEVICE(0x0106, info), \
#define INTEL_SNB_D_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0112, info), \
INTEL_VGA_DEVICE(0x0122, info)
#define INTEL_SNB_D_IDS(info) \
INTEL_SNB_D_GT1_IDS(info), \
INTEL_SNB_D_GT2_IDS(info)
#define INTEL_SNB_M_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0106, info)
#define INTEL_SNB_M_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0116, info), \
INTEL_VGA_DEVICE(0x0126, info)
#define INTEL_SNB_M_IDS(info) \
INTEL_SNB_M_GT1_IDS(info), \
INTEL_SNB_M_GT2_IDS(info)
#define INTEL_IVB_M_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0156, info) /* GT1 mobile */
#define INTEL_IVB_M_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
#define INTEL_IVB_M_IDS(info) \
INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \
INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */
INTEL_IVB_M_GT1_IDS(info), \
INTEL_IVB_M_GT2_IDS(info)
#define INTEL_IVB_D_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
INTEL_VGA_DEVICE(0x015a, info) /* GT1 server */
#define INTEL_IVB_D_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
#define INTEL_IVB_D_IDS(info) \
INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \
INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \
INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \
INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */
INTEL_IVB_D_GT1_IDS(info), \
INTEL_IVB_D_GT2_IDS(info)
#define INTEL_IVB_Q_IDS(info) \
INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
#define INTEL_HSW_IDS(info) \
#define INTEL_HSW_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \
INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \
INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \
INTEL_VGA_DEVICE(0x0D06, info) /* CRW GT1 mobile */
#define INTEL_HSW_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \
INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \
INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \
INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \
INTEL_VGA_DEVICE(0x0D16, info) /* CRW GT2 mobile */
#define INTEL_HSW_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \
INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \
INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \
INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
#define INTEL_HSW_IDS(info) \
INTEL_HSW_GT1_IDS(info), \
INTEL_HSW_GT2_IDS(info), \
INTEL_HSW_GT3_IDS(info)
#define INTEL_VLV_IDS(info) \
INTEL_VGA_DEVICE(0x0f30, info), \
INTEL_VGA_DEVICE(0x0f31, info), \
@ -212,17 +245,19 @@
INTEL_VGA_DEVICE(0x0157, info), \
INTEL_VGA_DEVICE(0x0155, info)
#define INTEL_BDW_GT12_IDS(info) \
#define INTEL_BDW_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \
INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */
#define INTEL_BDW_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
INTEL_VGA_DEVICE(0x161E, info), /* GT2 ULX */ \
INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
INTEL_VGA_DEVICE(0x160D, info), /* GT1 Workstation */ \
INTEL_VGA_DEVICE(0x161E, info), /* GT2 ULX */ \
INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
@ -243,7 +278,8 @@
INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
#define INTEL_BDW_IDS(info) \
INTEL_BDW_GT12_IDS(info), \
INTEL_BDW_GT1_IDS(info), \
INTEL_BDW_GT2_IDS(info), \
INTEL_BDW_GT3_IDS(info), \
INTEL_BDW_RSVD_IDS(info)
@ -335,20 +371,22 @@
INTEL_KBL_GT4_IDS(info)
/* CFL S */
#define INTEL_CFL_S_IDS(info) \
#define INTEL_CFL_S_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \
INTEL_VGA_DEVICE(0x3E93, info) /* SRV GT1 */
#define INTEL_CFL_S_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E96, info) /* SRV GT2 */
/* CFL H */
#define INTEL_CFL_H_IDS(info) \
#define INTEL_CFL_H_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
/* CFL U */
#define INTEL_CFL_U_IDS(info) \
#define INTEL_CFL_U_GT3_IDS(info) \
INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \

View File

@ -1509,9 +1509,9 @@ struct drm_i915_perf_oa_config {
__u32 n_boolean_regs;
__u32 n_flex_regs;
__u64 __user mux_regs_ptr;
__u64 __user boolean_regs_ptr;
__u64 __user flex_regs_ptr;
__u64 mux_regs_ptr;
__u64 boolean_regs_ptr;
__u64 flex_regs_ptr;
};
#if defined(__cplusplus)