diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index bff2fa941f60..e79577cb4665 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -202,6 +202,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { DRM_MODE_CONNECTOR_TV, "TV" }, { DRM_MODE_CONNECTOR_eDP, "eDP" }, { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" }, + { DRM_MODE_CONNECTOR_DSI, "DSI" }, }; static const struct drm_prop_enum_list drm_encoder_enum_list[] = @@ -211,6 +212,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] = { DRM_MODE_ENCODER_LVDS, "LVDS" }, { DRM_MODE_ENCODER_TVDAC, "TV" }, { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, + { DRM_MODE_ENCODER_DSI, "DSI" }, }; void drm_connector_ida_init(void) diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index b8449a84a0dc..65e60d26891b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -21,6 +21,9 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \ intel_display.o \ intel_crt.o \ intel_lvds.o \ + intel_dsi.o \ + intel_dsi_cmd.o \ + intel_dsi_pll.o \ intel_bios.o \ intel_ddi.o \ intel_dp.o \ diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h index 33a62ad80100..312163379db9 100644 --- a/drivers/gpu/drm/i915/dvo.h +++ b/drivers/gpu/drm/i915/dvo.h @@ -76,17 +76,6 @@ struct intel_dvo_dev_ops { int (*mode_valid)(struct intel_dvo_device *dvo, struct drm_display_mode *mode); - /* - * Callback to adjust the mode to be set in the CRTC. - * - * This allows an output to adjust the clock or even the entire set of - * timings, which is used for panels with fixed timings or for - * buses with clock limitations. - */ - bool (*mode_fixup)(struct intel_dvo_device *dvo, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - /* * Callback for preparing mode changes on an output */ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a6f4cb5af185..80bed69fe5b7 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -145,6 +145,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (%s)", obj->ring->name); } +static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx) +{ + seq_putc(m, ctx->is_initialized ? 'I' : 'i'); + seq_putc(m, ctx->remap_slice ? 'R' : 'r'); + seq_putc(m, ' '); +} + static int i915_gem_object_list_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -1442,6 +1449,7 @@ static int i915_context_status(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; + struct i915_hw_context *ctx; int ret, i; ret = mutex_lock_interruptible(&dev->mode_config.mutex); @@ -1460,12 +1468,15 @@ static int i915_context_status(struct seq_file *m, void *unused) seq_putc(m, '\n'); } - for_each_ring(ring, dev_priv, i) { - if (ring->default_context) { - seq_printf(m, "HW default context %s ring ", ring->name); - describe_obj(m, ring->default_context->obj); - seq_putc(m, '\n'); - } + list_for_each_entry(ctx, &dev_priv->context_list, link) { + seq_puts(m, "HW context "); + describe_ctx(m, ctx); + for_each_ring(ring, dev_priv, i) + if (ring->default_context == ctx) + seq_printf(m, "(default context %s) ", ring->name); + + describe_obj(m, ctx->obj); + seq_putc(m, '\n'); } mutex_unlock(&dev->mode_config.mutex); @@ -1610,27 +1621,27 @@ static int i915_dpio_info(struct seq_file *m, void *data) seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL)); seq_printf(m, "DPIO_DIV_A: 0x%08x\n", - vlv_dpio_read(dev_priv, _DPIO_DIV_A)); + vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A)); seq_printf(m, "DPIO_DIV_B: 0x%08x\n", - vlv_dpio_read(dev_priv, _DPIO_DIV_B)); + vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B)); seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n", - vlv_dpio_read(dev_priv, _DPIO_REFSFR_A)); + vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A)); seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n", - vlv_dpio_read(dev_priv, _DPIO_REFSFR_B)); + vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B)); seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n", - vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A)); + vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A)); seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n", - vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B)); + vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B)); seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n", - vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A)); + vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A)); seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n", - vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B)); + vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B)); seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", - vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); + vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE)); mutex_unlock(&dev_priv->dpio_lock); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index c27a21034a5e..512645fdaff5 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -52,7 +52,7 @@ intel_ring_emit(LP_RING(dev_priv), x) #define ADVANCE_LP_RING() \ - intel_ring_advance(LP_RING(dev_priv)) + __intel_ring_advance(LP_RING(dev_priv)) /** * Lock test for when it's just for synchronization of ring access. @@ -1324,6 +1324,8 @@ static int i915_load_modeset_init(struct drm_device *dev) INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); + intel_init_power_well(dev); + intel_modeset_gem_init(dev); /* Always safe in the mode setting case. */ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 69d8ed5416c3..6f385e1e9ed6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -576,11 +576,20 @@ static void intel_resume_hotplug(struct drm_device *dev) drm_helper_hpd_irq_event(dev); } -static int __i915_drm_thaw(struct drm_device *dev) +static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; + intel_uncore_sanitize(dev); + + if (drm_core_check_feature(dev, DRIVER_MODESET) && + restore_gtt_mappings) { + mutex_lock(&dev->struct_mutex); + i915_gem_restore_gtt_mappings(dev); + mutex_unlock(&dev->struct_mutex); + } + i915_restore_state(dev); intel_opregion_setup(dev); @@ -596,6 +605,8 @@ static int __i915_drm_thaw(struct drm_device *dev) /* We need working interrupts for modeset enabling ... */ drm_irq_install(dev); + intel_init_power_well(dev); + intel_modeset_init_hw(dev); drm_modeset_lock_all(dev); @@ -640,19 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev) static int i915_drm_thaw(struct drm_device *dev) { - int error = 0; - - intel_uncore_sanitize(dev); - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - mutex_lock(&dev->struct_mutex); - i915_gem_restore_gtt_mappings(dev); - mutex_unlock(&dev->struct_mutex); - } - - __i915_drm_thaw(dev); - - return error; + return __i915_drm_thaw(dev, true); } int i915_resume(struct drm_device *dev) @@ -668,20 +667,12 @@ int i915_resume(struct drm_device *dev) pci_set_master(dev->pdev); - intel_uncore_sanitize(dev); - /* * Platforms with opregion should have sane BIOS, older ones (gen3 and - * earlier) need this since the BIOS might clear all our scratch PTEs. + * earlier) need to restore the GTT mappings since the BIOS might clear + * all our scratch PTEs. */ - if (drm_core_check_feature(dev, DRIVER_MODESET) && - !dev_priv->opregion.header) { - mutex_lock(&dev->struct_mutex); - i915_gem_restore_gtt_mappings(dev); - mutex_unlock(&dev->struct_mutex); - } - - ret = __i915_drm_thaw(dev); + ret = __i915_drm_thaw(dev, !dev_priv->opregion.header); if (ret) return ret; @@ -719,24 +710,19 @@ int i915_reset(struct drm_device *dev) simulated = dev_priv->gpu_error.stop_rings != 0; - if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) { - DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); - ret = -ENODEV; - } else { - ret = intel_gpu_reset(dev); + ret = intel_gpu_reset(dev); - /* Also reset the gpu hangman. */ - if (simulated) { - DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); - dev_priv->gpu_error.stop_rings = 0; - if (ret == -ENODEV) { - DRM_ERROR("Reset not implemented, but ignoring " - "error for simulated gpu hangs\n"); - ret = 0; - } - } else - dev_priv->gpu_error.last_reset = get_seconds(); + /* Also reset the gpu hangman. */ + if (simulated) { + DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); + dev_priv->gpu_error.stop_rings = 0; + if (ret == -ENODEV) { + DRM_ERROR("Reset not implemented, but ignoring " + "error for simulated gpu hangs\n"); + ret = 0; + } } + if (ret) { DRM_ERROR("Failed to reset chip.\n"); mutex_unlock(&dev->struct_mutex); @@ -799,6 +785,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct intel_device_info *intel_info = (struct intel_device_info *) ent->driver_data; + if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) { + DRM_INFO("This hardware requires preliminary hardware support.\n" + "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); + return -ENODEV; + } + /* Only bind to function 0 of the device. Early generations * used function 1 as a placeholder for multi-head. This causes * us confusion instead, especially on the systems where both diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 35874b3a86dc..8c52cbdb76f3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -99,6 +99,7 @@ enum intel_display_power_domain { POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF, + POWER_DOMAIN_VGA, }; #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) @@ -225,6 +226,8 @@ struct intel_opregion { struct opregion_header __iomem *header; struct opregion_acpi __iomem *acpi; struct opregion_swsci __iomem *swsci; + u32 swsci_gbda_sub_functions; + u32 swsci_sbcb_sub_functions; struct opregion_asle __iomem *asle; void __iomem *vbt; u32 __iomem *lid_state; @@ -326,6 +329,8 @@ struct drm_i915_error_state { u32 *active_bo_count, *pinned_bo_count; struct intel_overlay_error_state *overlay; struct intel_display_error_state *display; + int hangcheck_score[I915_NUM_RINGS]; + enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS]; }; struct intel_crtc_config; @@ -357,7 +362,7 @@ struct drm_i915_display_funcs { int target, int refclk, struct dpll *match_clock, struct dpll *best_clock); - void (*update_wm)(struct drm_device *dev); + void (*update_wm)(struct drm_crtc *crtc); void (*update_sprite_wm)(struct drm_plane *plane, struct drm_crtc *crtc, uint32_t sprite_width, int pixel_size, @@ -367,7 +372,6 @@ struct drm_i915_display_funcs { * fills out the pipe-config with the hw state. */ bool (*get_pipe_config)(struct intel_crtc *, struct intel_crtc_config *); - void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *); int (*crtc_mode_set)(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb); @@ -420,6 +424,7 @@ struct intel_uncore { func(is_ivybridge) sep \ func(is_valleyview) sep \ func(is_haswell) sep \ + func(is_preliminary) sep \ func(has_force_wake) sep \ func(has_fbc) sep \ func(has_pipe_cxsr) sep \ @@ -568,6 +573,13 @@ struct i915_vma { /** This vma's place in the batchbuffer or on the eviction list */ struct list_head exec_list; + /** + * Used for performing relocations during execbuffer insertion. + */ + struct hlist_node exec_node; + unsigned long exec_handle; + struct drm_i915_gem_exec_object2 *exec_entry; + }; struct i915_ctx_hang_stats { @@ -576,6 +588,12 @@ struct i915_ctx_hang_stats { /* This context had batch active when hang was declared */ unsigned batch_active; + + /* Time when this context was last blamed for a GPU reset */ + unsigned long guilty_ts; + + /* This context is banned to submit more work */ + bool banned; }; /* This must match up with the value previously used for execbuf2.rsvd1. */ @@ -584,10 +602,13 @@ struct i915_hw_context { struct kref ref; int id; bool is_initialized; + uint8_t remap_slice; struct drm_i915_file_private *file_priv; struct intel_ring_buffer *ring; struct drm_i915_gem_object *obj; struct i915_ctx_hang_stats hang_stats; + + struct list_head link; }; struct i915_fbc { @@ -900,9 +921,11 @@ struct i915_ums_state { int mm_suspended; }; +#define MAX_L3_SLICES 2 struct intel_l3_parity { - u32 *remap_info; + u32 *remap_info[MAX_L3_SLICES]; struct work_struct error_work; + int which_slice; }; struct i915_gem_mm { @@ -977,6 +1000,9 @@ struct i915_gpu_error { /* For hangcheck timer */ #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) + /* Hang gpu twice in this window and your context gets banned */ +#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) + struct timer_list hangcheck_timer; /* For reset and error_state handling. */ @@ -985,8 +1011,6 @@ struct i915_gpu_error { struct drm_i915_error_state *first_error; struct work_struct work; - unsigned long last_reset; - /** * State variable and reset counter controlling the reset flow * @@ -1058,6 +1082,11 @@ struct intel_vbt_data { int edp_bpp; struct edp_power_seq edp_pps; + /* MIPI DSI */ + struct { + u16 panel_id; + } dsi; + int crt_ddc_pin; int child_dev_num; @@ -1318,6 +1347,7 @@ typedef struct drm_i915_private { bool hw_contexts_disabled; uint32_t hw_context_size; + struct list_head context_list; u32 fdi_rx_config; @@ -1398,8 +1428,6 @@ struct drm_i915_gem_object { struct list_head ring_list; /** Used in execbuf to temporarily hold a ref */ struct list_head obj_exec_link; - /** This object's place in the batchbuffer or on the eviction list */ - struct list_head exec_list; /** * This is set if the object is on the active lists (has pending @@ -1485,13 +1513,6 @@ struct drm_i915_gem_object { void *dma_buf_vmapping; int vmapping_count; - /** - * Used for performing relocations during execbuffer insertion. - */ - struct hlist_node exec_node; - unsigned long exec_handle; - struct drm_i915_gem_exec_object2 *exec_entry; - struct intel_ring_buffer *ring; /** Breadcrumb of last rendering to the buffer. */ @@ -1600,6 +1621,9 @@ struct drm_i915_file_private { ((dev)->pci_device & 0xFF00) == 0x0C00) #define IS_ULT(dev) (IS_HASWELL(dev) && \ ((dev)->pci_device & 0xFF00) == 0x0A00) +#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ + ((dev)->pci_device & 0x00F0) == 0x0020) +#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) /* * The genX designation typically refers to the render engine, so render @@ -1668,7 +1692,9 @@ struct drm_i915_file_private { #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) -#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) +/* DPF == dynamic parity feature */ +#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) +#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) #define GT_FREQUENCY_MULTIPLIER 50 @@ -1828,8 +1854,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); -struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, - struct i915_address_space *vm); void i915_gem_vma_destroy(struct i915_vma *vma); int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, @@ -1931,7 +1955,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); int __must_check i915_gem_init(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); -void i915_gem_l3_remap(struct drm_device *dev); +int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); @@ -2090,6 +2114,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, unsigned cache_level, bool mappable, bool nonblock); +int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); int i915_gem_evict_everything(struct drm_device *dev); /* i915_gem_stolen.c */ @@ -2182,15 +2207,30 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) extern void intel_i2c_reset(struct drm_device *dev); /* intel_opregion.c */ +struct intel_encoder; extern int intel_opregion_setup(struct drm_device *dev); #ifdef CONFIG_ACPI extern void intel_opregion_init(struct drm_device *dev); extern void intel_opregion_fini(struct drm_device *dev); extern void intel_opregion_asle_intr(struct drm_device *dev); +extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, + bool enable); +extern int intel_opregion_notify_adapter(struct drm_device *dev, + pci_power_t state); #else static inline void intel_opregion_init(struct drm_device *dev) { return; } static inline void intel_opregion_fini(struct drm_device *dev) { return; } static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } +static inline int +intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) +{ + return 0; +} +static inline int +intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) +{ + return 0; +} #endif /* intel_acpi.c */ @@ -2252,8 +2292,16 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val); u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); -u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg); -void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val); +u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); +void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); +void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); +void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); +void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); +void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val); u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, enum intel_sbi_destination destination); void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index cdfb9da0e4ce..36c4ad9c752e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -41,6 +41,9 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj, bool force); static __must_check int +i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, + bool readonly); +static __must_check int i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_address_space *vm, unsigned alignment, @@ -432,11 +435,9 @@ i915_gem_shmem_pread(struct drm_device *dev, * optimizes for the case when the gpu will dirty the data * anyway again before the next pread happens. */ needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level); - if (i915_gem_obj_bound_any(obj)) { - ret = i915_gem_object_set_to_gtt_domain(obj, false); - if (ret) - return ret; - } + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) + return ret; } ret = i915_gem_object_get_pages(obj); @@ -748,11 +749,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev, * optimizes for the case when the gpu will use the data * right away and we therefore have to clflush anyway. */ needs_clflush_after = cpu_write_needs_clflush(obj); - if (i915_gem_obj_bound_any(obj)) { - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - return ret; - } + ret = i915_gem_object_wait_rendering(obj, false); + if (ret) + return ret; } /* Same trick applies to invalidate partially written cachelines read * before writing. */ @@ -966,7 +965,7 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex)); ret = 0; - if (seqno == ring->outstanding_lazy_request) + if (seqno == ring->outstanding_lazy_seqno) ret = i915_add_request(ring, NULL); return ret; @@ -2078,11 +2077,10 @@ int __i915_add_request(struct intel_ring_buffer *ring, if (ret) return ret; - request = kmalloc(sizeof(*request), GFP_KERNEL); - if (request == NULL) + request = ring->preallocated_lazy_request; + if (WARN_ON(request == NULL)) return -ENOMEM; - /* Record the position of the start of the request so that * should we detect the updated seqno part-way through the * GPU processing the request, we never over-estimate the @@ -2091,17 +2089,13 @@ int __i915_add_request(struct intel_ring_buffer *ring, request_ring_position = intel_ring_get_tail(ring); ret = ring->add_request(ring); - if (ret) { - kfree(request); + if (ret) return ret; - } request->seqno = intel_ring_get_seqno(ring); request->ring = ring; request->head = request_start; request->tail = request_ring_position; - request->ctx = ring->last_context; - request->batch_obj = obj; /* Whilst this request exists, batch_obj will be on the * active_list, and so will hold the active reference. Only when this @@ -2109,7 +2103,12 @@ int __i915_add_request(struct intel_ring_buffer *ring, * inactive_list and lose its active reference. Hence we do not need * to explicitly hold another reference here. */ + request->batch_obj = obj; + /* Hold a reference to the current context so that we can inspect + * it later in case a hangcheck error event fires. + */ + request->ctx = ring->last_context; if (request->ctx) i915_gem_context_reference(request->ctx); @@ -2129,7 +2128,8 @@ int __i915_add_request(struct intel_ring_buffer *ring, } trace_i915_gem_request_add(ring, request->seqno); - ring->outstanding_lazy_request = 0; + ring->outstanding_lazy_seqno = 0; + ring->preallocated_lazy_request = NULL; if (!dev_priv->ums.mm_suspended) { i915_queue_hangcheck(ring->dev); @@ -2224,6 +2224,21 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request, return false; } +static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs) +{ + const unsigned long elapsed = get_seconds() - hs->guilty_ts; + + if (hs->banned) + return true; + + if (elapsed <= DRM_I915_CTX_BAN_PERIOD) { + DRM_ERROR("context hanging too fast, declaring banned!\n"); + return true; + } + + return false; +} + static void i915_set_reset_status(struct intel_ring_buffer *ring, struct drm_i915_gem_request *request, u32 acthd) @@ -2260,10 +2275,13 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring, hs = &request->file_priv->hang_stats; if (hs) { - if (guilty) + if (guilty) { + hs->banned = i915_context_is_banned(hs); hs->batch_active++; - else + hs->guilty_ts = get_seconds(); + } else { hs->batch_pending++; + } } } @@ -2641,11 +2659,17 @@ int i915_vma_unbind(struct i915_vma *vma) drm_i915_private_t *dev_priv = obj->base.dev->dev_private; int ret; + /* For now we only ever use 1 vma per object */ + WARN_ON(!list_is_singular(&obj->vma_list)); + if (list_empty(&vma->vma_link)) return 0; - if (!drm_mm_node_allocated(&vma->node)) - goto destroy; + if (!drm_mm_node_allocated(&vma->node)) { + i915_gem_vma_destroy(vma); + + return 0; + } if (obj->pin_count) return -EBUSY; @@ -2685,13 +2709,10 @@ int i915_vma_unbind(struct i915_vma *vma) drm_mm_remove_node(&vma->node); -destroy: i915_gem_vma_destroy(vma); /* Since the unbound list is global, only move to that list if - * no more VMAs exist. - * NB: Until we have real VMAs there will only ever be one */ - WARN_ON(!list_empty(&obj->vma_list)); + * no more VMAs exist. */ if (list_empty(&obj->vma_list)) list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); @@ -4015,7 +4036,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, { INIT_LIST_HEAD(&obj->global_list); INIT_LIST_HEAD(&obj->ring_list); - INIT_LIST_HEAD(&obj->exec_list); INIT_LIST_HEAD(&obj->obj_exec_link); INIT_LIST_HEAD(&obj->vma_list); @@ -4147,8 +4167,19 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) i915_gem_object_free(obj); } -struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, +struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm) +{ + struct i915_vma *vma; + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (vma->vm == vm) + return vma; + + return NULL; +} + +static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) { struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (vma == NULL) @@ -4169,10 +4200,29 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, return vma; } +struct i915_vma * +i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) +{ + struct i915_vma *vma; + + vma = i915_gem_obj_to_vma(obj, vm); + if (!vma) + vma = __i915_gem_vma_create(obj, vm); + + return vma; +} + void i915_gem_vma_destroy(struct i915_vma *vma) { WARN_ON(vma->node.allocated); + + /* Keep the vma as a placeholder in the execbuffer reservation lists */ + if (!list_empty(&vma->exec_list)) + return; + list_del(&vma->vma_link); + kfree(vma); } @@ -4209,36 +4259,35 @@ i915_gem_idle(struct drm_device *dev) return 0; } -void i915_gem_l3_remap(struct drm_device *dev) +int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice) { + struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; - u32 misccpctl; - int i; + u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200); + u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; + int i, ret; - if (!HAS_L3_GPU_CACHE(dev)) - return; + if (!HAS_L3_DPF(dev) || !remap_info) + return 0; - if (!dev_priv->l3_parity.remap_info) - return; - - misccpctl = I915_READ(GEN7_MISCCPCTL); - I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); - POSTING_READ(GEN7_MISCCPCTL); + ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3); + if (ret) + return ret; + /* + * Note: We do not worry about the concurrent register cacheline hang + * here because no other code should access these registers other than + * at initialization time. + */ for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { - u32 remap = I915_READ(GEN7_L3LOG_BASE + i); - if (remap && remap != dev_priv->l3_parity.remap_info[i/4]) - DRM_DEBUG("0x%x was already programmed to %x\n", - GEN7_L3LOG_BASE + i, remap); - if (remap && !dev_priv->l3_parity.remap_info[i/4]) - DRM_DEBUG_DRIVER("Clearing remapped register\n"); - I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]); + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, reg_base + i); + intel_ring_emit(ring, remap_info[i/4]); } - /* Make sure all the writes land before disabling dop clock gating */ - POSTING_READ(GEN7_L3LOG_BASE); + intel_ring_advance(ring); - I915_WRITE(GEN7_MISCCPCTL, misccpctl); + return ret; } void i915_gem_init_swizzling(struct drm_device *dev) @@ -4330,7 +4379,7 @@ int i915_gem_init_hw(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - int ret; + int ret, i; if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) return -EIO; @@ -4338,20 +4387,26 @@ i915_gem_init_hw(struct drm_device *dev) if (dev_priv->ellc_size) I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); + if (IS_HSW_GT3(dev)) + I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED); + else + I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED); + if (HAS_PCH_NOP(dev)) { u32 temp = I915_READ(GEN7_MSG_CTL); temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK); I915_WRITE(GEN7_MSG_CTL, temp); } - i915_gem_l3_remap(dev); - i915_gem_init_swizzling(dev); ret = i915_gem_init_rings(dev); if (ret) return ret; + for (i = 0; i < NUM_L3_SLICES(dev); i++) + i915_gem_l3_remap(&dev_priv->ring[RCS], i); + /* * XXX: There was some w/a described somewhere suggesting loading * contexts before PPGTT. @@ -4523,6 +4578,7 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->vm_list); i915_init_vm(dev_priv, &dev_priv->gtt.base); + INIT_LIST_HEAD(&dev_priv->context_list); INIT_LIST_HEAD(&dev_priv->mm.unbound_list); INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); @@ -4859,11 +4915,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o, bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) { - struct drm_i915_private *dev_priv = o->base.dev->dev_private; - struct i915_address_space *vm; + struct i915_vma *vma; - list_for_each_entry(vm, &dev_priv->vm_list, global_link) - if (i915_gem_obj_bound(o, vm)) + list_for_each_entry(vma, &o->vma_list, vma_link) + if (drm_mm_node_allocated(&vma->node)) return true; return false; @@ -4920,27 +4975,3 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc) mutex_unlock(&dev->struct_mutex); return freed; } - -struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) -{ - struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (vma->vm == vm) - return vma; - - return NULL; -} - -struct i915_vma * -i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) -{ - struct i915_vma *vma; - - vma = i915_gem_obj_to_vma(obj, vm); - if (!vma) - vma = i915_gem_vma_create(obj, vm); - - return vma; -} diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 403309c2a7d6..9af3fe7e42b0 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -73,7 +73,7 @@ * * There are two confusing terms used above: * The "current context" means the context which is currently running on the - * GPU. The GPU has loaded it's state already and has stored away the gtt + * GPU. The GPU has loaded its state already and has stored away the gtt * offset of the BO. The GPU is not actively referencing the data at this * offset, but it will on the next context switch. The only way to avoid this * is to do a GPU reset. @@ -129,6 +129,7 @@ void i915_gem_context_free(struct kref *ctx_ref) struct i915_hw_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); + list_del(&ctx->link); drm_gem_object_unreference(&ctx->obj->base); kfree(ctx); } @@ -147,6 +148,7 @@ create_hw_context(struct drm_device *dev, kref_init(&ctx->ref); ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size); + INIT_LIST_HEAD(&ctx->link); if (ctx->obj == NULL) { kfree(ctx); DRM_DEBUG_DRIVER("Context object allocated failed\n"); @@ -166,6 +168,7 @@ create_hw_context(struct drm_device *dev, * assertion in the context switch code. */ ctx->ring = &dev_priv->ring[RCS]; + list_add_tail(&ctx->link, &dev_priv->context_list); /* Default context will never have a file_priv */ if (file_priv == NULL) @@ -178,6 +181,10 @@ create_hw_context(struct drm_device *dev, ctx->file_priv = file_priv; ctx->id = ret; + /* NB: Mark all slices as needing a remap so that when the context first + * loads it will restore whatever remap state already exists. If there + * is no remap info, it will be a NOP. */ + ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; return ctx; @@ -393,11 +400,11 @@ static int do_switch(struct i915_hw_context *to) struct intel_ring_buffer *ring = to->ring; struct i915_hw_context *from = ring->last_context; u32 hw_flags = 0; - int ret; + int ret, i; BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0); - if (from == to) + if (from == to && !to->remap_slice) return 0; ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false); @@ -420,8 +427,6 @@ static int do_switch(struct i915_hw_context *to) if (!to->is_initialized || is_default_context(to)) hw_flags |= MI_RESTORE_INHIBIT; - else if (WARN_ON_ONCE(from == to)) /* not yet expected */ - hw_flags |= MI_FORCE_RESTORE; ret = mi_set_context(ring, to, hw_flags); if (ret) { @@ -429,6 +434,18 @@ static int do_switch(struct i915_hw_context *to) return ret; } + for (i = 0; i < MAX_L3_SLICES; i++) { + if (!(to->remap_slice & (1<remap_slice &= ~(1<obj->dirty = 1; BUG_ON(from->obj->ring != ring); - ret = i915_add_request(ring, NULL); - if (ret) { - /* Too late, we've already scheduled a context switch. - * Try to undo the change so that the hw state is - * consistent with out tracking. In case of emergency, - * scream. - */ - WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT)); - return ret; - } - + /* obj is kept alive until the next request by its active ref */ i915_gem_object_unpin(from->obj); i915_gem_context_unreference(from); } diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 91b700155850..3a3981eb3012 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -37,6 +37,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) if (vma->obj->pin_count) return false; + if (WARN_ON(!list_empty(&vma->exec_list))) + return false; + list_add(&vma->exec_list, unwind); return drm_mm_scan_add_block(&vma->node); } @@ -113,7 +116,7 @@ none: } /* We expect the caller to unpin, evict all and try again, or give up. - * So calling i915_gem_evict_everything() is unnecessary. + * So calling i915_gem_evict_vm() is unnecessary. */ return -ENOSPC; @@ -152,12 +155,46 @@ found: return ret; } +/** + * i915_gem_evict_vm - Try to free up VM space + * + * @vm: Address space to evict from + * @do_idle: Boolean directing whether to idle first. + * + * VM eviction is about freeing up virtual address space. If one wants fine + * grained eviction, they should see evict something for more details. In terms + * of freeing up actual system memory, this function may not accomplish the + * desired result. An object may be shared in multiple address space, and this + * function will not assert those objects be freed. + * + * Using do_idle will result in a more complete eviction because it retires, and + * inactivates current BOs. + */ +int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) +{ + struct i915_vma *vma, *next; + int ret; + + if (do_idle) { + ret = i915_gpu_idle(vm->dev); + if (ret) + return ret; + + i915_gem_retire_requests(vm->dev); + } + + list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) + if (vma->obj->pin_count == 0) + WARN_ON(i915_vma_unbind(vma)); + + return 0; +} + int i915_gem_evict_everything(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct i915_address_space *vm; - struct i915_vma *vma, *next; bool lists_empty = true; int ret; @@ -184,11 +221,8 @@ i915_gem_evict_everything(struct drm_device *dev) i915_gem_retire_requests(dev); /* Having flushed everything, unbind() should never raise an error */ - list_for_each_entry(vm, &dev_priv->vm_list, global_link) { - list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) - if (vma->obj->pin_count == 0) - WARN_ON(i915_vma_unbind(vma)); - } + list_for_each_entry(vm, &dev_priv->vm_list, global_link) + WARN_ON(i915_gem_evict_vm(vm, false)); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index bf345777ae9f..ee933572bdc1 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -33,24 +33,24 @@ #include "intel_drv.h" #include -struct eb_objects { - struct list_head objects; +struct eb_vmas { + struct list_head vmas; int and; union { - struct drm_i915_gem_object *lut[0]; + struct i915_vma *lut[0]; struct hlist_head buckets[0]; }; }; -static struct eb_objects * -eb_create(struct drm_i915_gem_execbuffer2 *args) +static struct eb_vmas * +eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm) { - struct eb_objects *eb = NULL; + struct eb_vmas *eb = NULL; if (args->flags & I915_EXEC_HANDLE_LUT) { int size = args->buffer_count; - size *= sizeof(struct drm_i915_gem_object *); - size += sizeof(struct eb_objects); + size *= sizeof(struct i915_vma *); + size += sizeof(struct eb_vmas); eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); } @@ -61,7 +61,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args) while (count > 2*size) count >>= 1; eb = kzalloc(count*sizeof(struct hlist_head) + - sizeof(struct eb_objects), + sizeof(struct eb_vmas), GFP_TEMPORARY); if (eb == NULL) return eb; @@ -70,64 +70,102 @@ eb_create(struct drm_i915_gem_execbuffer2 *args) } else eb->and = -args->buffer_count; - INIT_LIST_HEAD(&eb->objects); + INIT_LIST_HEAD(&eb->vmas); return eb; } static void -eb_reset(struct eb_objects *eb) +eb_reset(struct eb_vmas *eb) { if (eb->and >= 0) memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); } static int -eb_lookup_objects(struct eb_objects *eb, - struct drm_i915_gem_exec_object2 *exec, - const struct drm_i915_gem_execbuffer2 *args, - struct drm_file *file) +eb_lookup_vmas(struct eb_vmas *eb, + struct drm_i915_gem_exec_object2 *exec, + const struct drm_i915_gem_execbuffer2 *args, + struct i915_address_space *vm, + struct drm_file *file) { - int i; + struct drm_i915_gem_object *obj; + struct list_head objects; + int i, ret = 0; + INIT_LIST_HEAD(&objects); spin_lock(&file->table_lock); + /* Grab a reference to the object and release the lock so we can lookup + * or create the VMA without using GFP_ATOMIC */ for (i = 0; i < args->buffer_count; i++) { - struct drm_i915_gem_object *obj; - obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); if (obj == NULL) { spin_unlock(&file->table_lock); DRM_DEBUG("Invalid object handle %d at index %d\n", exec[i].handle, i); - return -ENOENT; + ret = -ENOENT; + goto out; } - if (!list_empty(&obj->exec_list)) { + if (!list_empty(&obj->obj_exec_link)) { spin_unlock(&file->table_lock); DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", obj, exec[i].handle, i); - return -EINVAL; + ret = -EINVAL; + goto out; } drm_gem_object_reference(&obj->base); - list_add_tail(&obj->exec_list, &eb->objects); - - obj->exec_entry = &exec[i]; - if (eb->and < 0) { - eb->lut[i] = obj; - } else { - uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; - obj->exec_handle = handle; - hlist_add_head(&obj->exec_node, - &eb->buckets[handle & eb->and]); - } + list_add_tail(&obj->obj_exec_link, &objects); } spin_unlock(&file->table_lock); - return 0; + i = 0; + list_for_each_entry(obj, &objects, obj_exec_link) { + struct i915_vma *vma; + + /* + * NOTE: We can leak any vmas created here when something fails + * later on. But that's no issue since vma_unbind can deal with + * vmas which are not actually bound. And since only + * lookup_or_create exists as an interface to get at the vma + * from the (obj, vm) we don't run the risk of creating + * duplicated vmas for the same vm. + */ + vma = i915_gem_obj_lookup_or_create_vma(obj, vm); + if (IS_ERR(vma)) { + DRM_DEBUG("Failed to lookup VMA\n"); + ret = PTR_ERR(vma); + goto out; + } + + list_add_tail(&vma->exec_list, &eb->vmas); + + vma->exec_entry = &exec[i]; + if (eb->and < 0) { + eb->lut[i] = vma; + } else { + uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; + vma->exec_handle = handle; + hlist_add_head(&vma->exec_node, + &eb->buckets[handle & eb->and]); + } + ++i; + } + + +out: + while (!list_empty(&objects)) { + obj = list_first_entry(&objects, + struct drm_i915_gem_object, + obj_exec_link); + list_del_init(&obj->obj_exec_link); + if (ret) + drm_gem_object_unreference(&obj->base); + } + return ret; } -static struct drm_i915_gem_object * -eb_get_object(struct eb_objects *eb, unsigned long handle) +static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle) { if (eb->and < 0) { if (handle >= -eb->and) @@ -139,34 +177,33 @@ eb_get_object(struct eb_objects *eb, unsigned long handle) head = &eb->buckets[handle & eb->and]; hlist_for_each(node, head) { - struct drm_i915_gem_object *obj; + struct i915_vma *vma; - obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); - if (obj->exec_handle == handle) - return obj; + vma = hlist_entry(node, struct i915_vma, exec_node); + if (vma->exec_handle == handle) + return vma; } return NULL; } } -static void -eb_destroy(struct eb_objects *eb) -{ - while (!list_empty(&eb->objects)) { - struct drm_i915_gem_object *obj; +static void eb_destroy(struct eb_vmas *eb) { + while (!list_empty(&eb->vmas)) { + struct i915_vma *vma; - obj = list_first_entry(&eb->objects, - struct drm_i915_gem_object, + vma = list_first_entry(&eb->vmas, + struct i915_vma, exec_list); - list_del_init(&obj->exec_list); - drm_gem_object_unreference(&obj->base); + list_del_init(&vma->exec_list); + drm_gem_object_unreference(&vma->obj->base); } kfree(eb); } static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) { - return (obj->base.write_domain == I915_GEM_DOMAIN_CPU || + return (HAS_LLC(obj->base.dev) || + obj->base.write_domain == I915_GEM_DOMAIN_CPU || !obj->map_and_fenceable || obj->cache_level != I915_CACHE_NONE); } @@ -179,7 +216,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj, char *vaddr; int ret = -EINVAL; - ret = i915_gem_object_set_to_cpu_domain(obj, 1); + ret = i915_gem_object_set_to_cpu_domain(obj, true); if (ret) return ret; @@ -223,22 +260,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, - struct eb_objects *eb, + struct eb_vmas *eb, struct drm_i915_gem_relocation_entry *reloc, struct i915_address_space *vm) { struct drm_device *dev = obj->base.dev; struct drm_gem_object *target_obj; struct drm_i915_gem_object *target_i915_obj; + struct i915_vma *target_vma; uint32_t target_offset; int ret = -EINVAL; /* we've already hold a reference to all valid objects */ - target_obj = &eb_get_object(eb, reloc->target_handle)->base; - if (unlikely(target_obj == NULL)) + target_vma = eb_get_vma(eb, reloc->target_handle); + if (unlikely(target_vma == NULL)) return -ENOENT; + target_i915_obj = target_vma->obj; + target_obj = &target_vma->obj->base; - target_i915_obj = to_intel_bo(target_obj); target_offset = i915_gem_obj_ggtt_offset(target_i915_obj); /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and @@ -320,14 +359,13 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, } static int -i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, - struct eb_objects *eb, - struct i915_address_space *vm) +i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, + struct eb_vmas *eb) { #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry)) struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)]; struct drm_i915_gem_relocation_entry __user *user_relocs; - struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; int remain, ret; user_relocs = to_user_ptr(entry->relocs_ptr); @@ -346,8 +384,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, do { u64 offset = r->presumed_offset; - ret = i915_gem_execbuffer_relocate_entry(obj, eb, r, - vm); + ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, + vma->vm); if (ret) return ret; @@ -368,17 +406,16 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, } static int -i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, - struct eb_objects *eb, - struct drm_i915_gem_relocation_entry *relocs, - struct i915_address_space *vm) +i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, + struct eb_vmas *eb, + struct drm_i915_gem_relocation_entry *relocs) { - const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; int i, ret; for (i = 0; i < entry->relocation_count; i++) { - ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i], - vm); + ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], + vma->vm); if (ret) return ret; } @@ -387,10 +424,10 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, } static int -i915_gem_execbuffer_relocate(struct eb_objects *eb, +i915_gem_execbuffer_relocate(struct eb_vmas *eb, struct i915_address_space *vm) { - struct drm_i915_gem_object *obj; + struct i915_vma *vma; int ret = 0; /* This is the fast path and we cannot handle a pagefault whilst @@ -401,8 +438,8 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb, * lockdep complains vehemently. */ pagefault_disable(); - list_for_each_entry(obj, &eb->objects, exec_list) { - ret = i915_gem_execbuffer_relocate_object(obj, eb, vm); + list_for_each_entry(vma, &eb->vmas, exec_list) { + ret = i915_gem_execbuffer_relocate_vma(vma, eb); if (ret) break; } @@ -415,31 +452,32 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb, #define __EXEC_OBJECT_HAS_FENCE (1<<30) static int -need_reloc_mappable(struct drm_i915_gem_object *obj) +need_reloc_mappable(struct i915_vma *vma) { - struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; - return entry->relocation_count && !use_cpu_reloc(obj); + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; + return entry->relocation_count && !use_cpu_reloc(vma->obj) && + i915_is_ggtt(vma->vm); } static int -i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring, - struct i915_address_space *vm, - bool *need_reloc) +i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, + struct intel_ring_buffer *ring, + bool *need_reloc) { - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; - struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool need_fence, need_mappable; + struct drm_i915_gem_object *obj = vma->obj; int ret; need_fence = has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; - need_mappable = need_fence || need_reloc_mappable(obj); + need_mappable = need_fence || need_reloc_mappable(vma); - ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable, + ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable, false); if (ret) return ret; @@ -467,8 +505,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, obj->has_aliasing_ppgtt_mapping = 1; } - if (entry->offset != i915_gem_obj_offset(obj, vm)) { - entry->offset = i915_gem_obj_offset(obj, vm); + if (entry->offset != vma->node.start) { + entry->offset = vma->node.start; *need_reloc = true; } @@ -485,14 +523,15 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, } static void -i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) +i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma) { struct drm_i915_gem_exec_object2 *entry; + struct drm_i915_gem_object *obj = vma->obj; - if (!i915_gem_obj_bound_any(obj)) + if (!drm_mm_node_allocated(&vma->node)) return; - entry = obj->exec_entry; + entry = vma->exec_entry; if (entry->flags & __EXEC_OBJECT_HAS_FENCE) i915_gem_object_unpin_fence(obj); @@ -505,41 +544,46 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) static int i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, - struct list_head *objects, - struct i915_address_space *vm, + struct list_head *vmas, bool *need_relocs) { struct drm_i915_gem_object *obj; - struct list_head ordered_objects; + struct i915_vma *vma; + struct i915_address_space *vm; + struct list_head ordered_vmas; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; int retry; - INIT_LIST_HEAD(&ordered_objects); - while (!list_empty(objects)) { + if (list_empty(vmas)) + return 0; + + vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; + + INIT_LIST_HEAD(&ordered_vmas); + while (!list_empty(vmas)) { struct drm_i915_gem_exec_object2 *entry; bool need_fence, need_mappable; - obj = list_first_entry(objects, - struct drm_i915_gem_object, - exec_list); - entry = obj->exec_entry; + vma = list_first_entry(vmas, struct i915_vma, exec_list); + obj = vma->obj; + entry = vma->exec_entry; need_fence = has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; - need_mappable = need_fence || need_reloc_mappable(obj); + need_mappable = need_fence || need_reloc_mappable(vma); if (need_mappable) - list_move(&obj->exec_list, &ordered_objects); + list_move(&vma->exec_list, &ordered_vmas); else - list_move_tail(&obj->exec_list, &ordered_objects); + list_move_tail(&vma->exec_list, &ordered_vmas); obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; obj->base.pending_write_domain = 0; obj->pending_fenced_gpu_access = false; } - list_splice(&ordered_objects, objects); + list_splice(&ordered_vmas, vmas); /* Attempt to pin all of the buffers into the GTT. * This is done in 3 phases: @@ -558,52 +602,52 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, int ret = 0; /* Unbind any ill-fitting objects or pin. */ - list_for_each_entry(obj, objects, exec_list) { - struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + list_for_each_entry(vma, vmas, exec_list) { + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; bool need_fence, need_mappable; - u32 obj_offset; - if (!i915_gem_obj_bound(obj, vm)) + obj = vma->obj; + + if (!drm_mm_node_allocated(&vma->node)) continue; - obj_offset = i915_gem_obj_offset(obj, vm); need_fence = has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && obj->tiling_mode != I915_TILING_NONE; - need_mappable = need_fence || need_reloc_mappable(obj); + need_mappable = need_fence || need_reloc_mappable(vma); WARN_ON((need_mappable || need_fence) && - !i915_is_ggtt(vm)); + !i915_is_ggtt(vma->vm)); if ((entry->alignment && - obj_offset & (entry->alignment - 1)) || + vma->node.start & (entry->alignment - 1)) || (need_mappable && !obj->map_and_fenceable)) - ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)); + ret = i915_vma_unbind(vma); else - ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); + ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); if (ret) goto err; } /* Bind fresh objects */ - list_for_each_entry(obj, objects, exec_list) { - if (i915_gem_obj_bound(obj, vm)) + list_for_each_entry(vma, vmas, exec_list) { + if (drm_mm_node_allocated(&vma->node)) continue; - ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs); + ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); if (ret) goto err; } err: /* Decrement pin count for bound objects */ - list_for_each_entry(obj, objects, exec_list) - i915_gem_execbuffer_unreserve_object(obj); + list_for_each_entry(vma, vmas, exec_list) + i915_gem_execbuffer_unreserve_vma(vma); if (ret != -ENOSPC || retry++) return ret; - ret = i915_gem_evict_everything(ring->dev); + ret = i915_gem_evict_vm(vm, true); if (ret) return ret; } while (1); @@ -614,24 +658,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_i915_gem_execbuffer2 *args, struct drm_file *file, struct intel_ring_buffer *ring, - struct eb_objects *eb, - struct drm_i915_gem_exec_object2 *exec, - struct i915_address_space *vm) + struct eb_vmas *eb, + struct drm_i915_gem_exec_object2 *exec) { struct drm_i915_gem_relocation_entry *reloc; - struct drm_i915_gem_object *obj; + struct i915_address_space *vm; + struct i915_vma *vma; bool need_relocs; int *reloc_offset; int i, total, ret; int count = args->buffer_count; + if (WARN_ON(list_empty(&eb->vmas))) + return 0; + + vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm; + /* We may process another execbuffer during the unlock... */ - while (!list_empty(&eb->objects)) { - obj = list_first_entry(&eb->objects, - struct drm_i915_gem_object, - exec_list); - list_del_init(&obj->exec_list); - drm_gem_object_unreference(&obj->base); + while (!list_empty(&eb->vmas)) { + vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list); + list_del_init(&vma->exec_list); + drm_gem_object_unreference(&vma->obj->base); } mutex_unlock(&dev->struct_mutex); @@ -695,20 +742,19 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, /* reacquire the objects */ eb_reset(eb); - ret = eb_lookup_objects(eb, exec, args, file); + ret = eb_lookup_vmas(eb, exec, args, vm, file); if (ret) goto err; need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); + ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); if (ret) goto err; - list_for_each_entry(obj, &eb->objects, exec_list) { - int offset = obj->exec_entry - exec; - ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, - reloc + reloc_offset[offset], - vm); + list_for_each_entry(vma, &eb->vmas, exec_list) { + int offset = vma->exec_entry - exec; + ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb, + reloc + reloc_offset[offset]); if (ret) goto err; } @@ -727,14 +773,15 @@ err: static int i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, - struct list_head *objects) + struct list_head *vmas) { - struct drm_i915_gem_object *obj; + struct i915_vma *vma; uint32_t flush_domains = 0; bool flush_chipset = false; int ret; - list_for_each_entry(obj, objects, exec_list) { + list_for_each_entry(vma, vmas, exec_list) { + struct drm_i915_gem_object *obj = vma->obj; ret = i915_gem_object_sync(obj, ring); if (ret) return ret; @@ -809,13 +856,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, } static void -i915_gem_execbuffer_move_to_active(struct list_head *objects, - struct i915_address_space *vm, +i915_gem_execbuffer_move_to_active(struct list_head *vmas, struct intel_ring_buffer *ring) { - struct drm_i915_gem_object *obj; + struct i915_vma *vma; - list_for_each_entry(obj, objects, exec_list) { + list_for_each_entry(vma, vmas, exec_list) { + struct drm_i915_gem_object *obj = vma->obj; u32 old_read = obj->base.read_domains; u32 old_write = obj->base.write_domain; @@ -825,8 +872,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, obj->base.read_domains = obj->base.pending_read_domains; obj->fenced_gpu_access = obj->pending_fenced_gpu_access; - /* FIXME: This lookup gets fixed later <-- danvet */ - list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list); + list_move_tail(&vma->mm_list, &vma->vm->active_list); i915_gem_object_move_to_active(obj, ring); if (obj->base.write_domain) { obj->dirty = 1; @@ -885,10 +931,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct i915_address_space *vm) { drm_i915_private_t *dev_priv = dev->dev_private; - struct eb_objects *eb; + struct eb_vmas *eb; struct drm_i915_gem_object *batch_obj; struct drm_clip_rect *cliprects = NULL; struct intel_ring_buffer *ring; + struct i915_ctx_hang_stats *hs; u32 ctx_id = i915_execbuffer2_get_context_id(*args); u32 exec_start, exec_len; u32 mask, flags; @@ -1025,7 +1072,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto pre_mutex_err; } - eb = eb_create(args); + eb = eb_create(args, vm); if (eb == NULL) { mutex_unlock(&dev->struct_mutex); ret = -ENOMEM; @@ -1033,18 +1080,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } /* Look up object handles */ - ret = eb_lookup_objects(eb, exec, args, file); + ret = eb_lookup_vmas(eb, exec, args, vm, file); if (ret) goto err; /* take note of the batch buffer before we might reorder the lists */ - batch_obj = list_entry(eb->objects.prev, - struct drm_i915_gem_object, - exec_list); + batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; /* Move the objects en-masse into the GTT, evicting if necessary. */ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs); + ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs); if (ret) goto err; @@ -1054,7 +1099,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) { if (ret == -EFAULT) { ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, - eb, exec, vm); + eb, exec); BUG_ON(!mutex_is_locked(&dev->struct_mutex)); } if (ret) @@ -1076,10 +1121,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); - ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); + ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas); if (ret) goto err; + hs = i915_gem_context_get_hang_stats(dev, file, ctx_id); + if (IS_ERR(hs)) { + ret = PTR_ERR(hs); + goto err; + } + + if (hs->banned) { + ret = -EIO; + goto err; + } + ret = i915_switch_context(ring, file, ctx_id); if (ret) goto err; @@ -1131,7 +1187,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); - i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring); + i915_gem_execbuffer_move_to_active(&eb->vmas, ring); i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); err: diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index e15a1d90037d..d284d892ed94 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -395,7 +395,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, if (gtt_offset == I915_GTT_OFFSET_NONE) return obj; - vma = i915_gem_vma_create(obj, ggtt); + vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err_out; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index dae364f0028c..7bea61325741 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -215,6 +215,24 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, } } +static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a) +{ + switch (a) { + case HANGCHECK_IDLE: + return "idle"; + case HANGCHECK_WAIT: + return "wait"; + case HANGCHECK_ACTIVE: + return "active"; + case HANGCHECK_KICK: + return "kick"; + case HANGCHECK_HUNG: + return "hung"; + } + + return "unknown"; +} + static void i915_ring_error_state(struct drm_i915_error_state_buf *m, struct drm_device *dev, struct drm_i915_error_state *error, @@ -255,6 +273,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]); err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]); + err_printf(m, " hangcheck: %s [%d]\n", + hangcheck_action_to_str(error->hangcheck_action[ring]), + error->hangcheck_score[ring]); } void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) @@ -720,6 +741,9 @@ static void i915_record_ring_state(struct drm_device *dev, error->cpu_ring_head[ring->id] = ring->head; error->cpu_ring_tail[ring->id] = ring->tail; + + error->hangcheck_score[ring->id] = ring->hangcheck.score; + error->hangcheck_action[ring->id] = ring->hangcheck.action; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4b91228fd9bd..b356dc15adda 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -665,7 +665,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, crtc); } -static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) +static bool intel_hpd_irq_event(struct drm_device *dev, + struct drm_connector *connector) { enum drm_connector_status old_status; @@ -673,11 +674,16 @@ static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *con old_status = connector->status; connector->status = connector->funcs->detect(connector, false); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", + if (old_status == connector->status) + return false; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", connector->base.id, drm_get_connector_name(connector), - old_status, connector->status); - return (old_status != connector->status); + drm_get_connector_status_name(old_status), + drm_get_connector_status_name(connector->status)); + + return true; } /* @@ -882,9 +888,10 @@ static void ivybridge_parity_work(struct work_struct *work) drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, l3_parity.error_work); u32 error_status, row, bank, subbank; - char *parity_event[5]; + char *parity_event[6]; uint32_t misccpctl; unsigned long flags; + uint8_t slice = 0; /* We must turn off DOP level clock gating to access the L3 registers. * In order to prevent a get/put style interface, acquire struct mutex @@ -892,55 +899,81 @@ static void ivybridge_parity_work(struct work_struct *work) */ mutex_lock(&dev_priv->dev->struct_mutex); + /* If we've screwed up tracking, just let the interrupt fire again */ + if (WARN_ON(!dev_priv->l3_parity.which_slice)) + goto out; + misccpctl = I915_READ(GEN7_MISCCPCTL); I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); POSTING_READ(GEN7_MISCCPCTL); - error_status = I915_READ(GEN7_L3CDERRST1); - row = GEN7_PARITY_ERROR_ROW(error_status); - bank = GEN7_PARITY_ERROR_BANK(error_status); - subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); + while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { + u32 reg; - I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | - GEN7_L3CDERRST1_ENABLE); - POSTING_READ(GEN7_L3CDERRST1); + slice--; + if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) + break; + + dev_priv->l3_parity.which_slice &= ~(1<dev->primary->kdev.kobj, + KOBJ_CHANGE, parity_event); + + DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", + slice, row, bank, subbank); + + kfree(parity_event[4]); + kfree(parity_event[3]); + kfree(parity_event[2]); + kfree(parity_event[1]); + } I915_WRITE(GEN7_MISCCPCTL, misccpctl); +out: + WARN_ON(dev_priv->l3_parity.which_slice); spin_lock_irqsave(&dev_priv->irq_lock, flags); - ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); + ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); mutex_unlock(&dev_priv->dev->struct_mutex); - - parity_event[0] = I915_L3_PARITY_UEVENT "=1"; - parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); - parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); - parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); - parity_event[4] = NULL; - - kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, - KOBJ_CHANGE, parity_event); - - DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", - row, bank, subbank); - - kfree(parity_event[3]); - kfree(parity_event[2]); - kfree(parity_event[1]); } -static void ivybridge_parity_error_irq_handler(struct drm_device *dev) +static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - if (!HAS_L3_GPU_CACHE(dev)) + if (!HAS_L3_DPF(dev)) return; spin_lock(&dev_priv->irq_lock); - ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT); + ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); spin_unlock(&dev_priv->irq_lock); + iir &= GT_PARITY_ERROR(dev); + if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) + dev_priv->l3_parity.which_slice |= 1 << 1; + + if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) + dev_priv->l3_parity.which_slice |= 1 << 0; + queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); } @@ -975,8 +1008,8 @@ static void snb_gt_irq_handler(struct drm_device *dev, i915_handle_error(dev, false); } - if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) - ivybridge_parity_error_irq_handler(dev); + if (gt_iir & GT_PARITY_ERROR(dev)) + ivybridge_parity_error_irq_handler(dev, gt_iir); } #define HPD_STORM_DETECT_PERIOD 1000 @@ -1388,7 +1421,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; - bool err_int_reenable = false; atomic_inc(&dev_priv->irq_received); @@ -1412,17 +1444,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) POSTING_READ(SDEIER); } - /* On Haswell, also mask ERR_INT because we don't want to risk - * generating "unclaimed register" interrupts from inside the interrupt - * handler. */ - if (IS_HASWELL(dev)) { - spin_lock(&dev_priv->irq_lock); - err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB; - if (err_int_reenable) - ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); - spin_unlock(&dev_priv->irq_lock); - } - gt_iir = I915_READ(GTIIR); if (gt_iir) { if (INTEL_INFO(dev)->gen >= 6) @@ -1452,13 +1473,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) } } - if (err_int_reenable) { - spin_lock(&dev_priv->irq_lock); - if (ivb_can_enable_err_int(dev)) - ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); - spin_unlock(&dev_priv->irq_lock); - } - I915_WRITE(DEIER, de_ier); POSTING_READ(DEIER); if (!HAS_PCH_NOP(dev)) { @@ -2021,6 +2035,8 @@ static void i915_hangcheck_elapsed(unsigned long data) if (ring->hangcheck.seqno == seqno) { if (ring_idle(ring, seqno)) { + ring->hangcheck.action = HANGCHECK_IDLE; + if (waitqueue_active(&ring->irq_queue)) { /* Issue a wake-up to catch stuck h/w. */ DRM_ERROR("Hangcheck timer elapsed... %s idle\n", @@ -2049,6 +2065,7 @@ static void i915_hangcheck_elapsed(unsigned long data) acthd); switch (ring->hangcheck.action) { + case HANGCHECK_IDLE: case HANGCHECK_WAIT: break; case HANGCHECK_ACTIVE: @@ -2064,6 +2081,8 @@ static void i915_hangcheck_elapsed(unsigned long data) } } } else { + ring->hangcheck.action = HANGCHECK_ACTIVE; + /* Gradually reduce the count so that we catch DoS * attempts across multiple batches. */ @@ -2254,10 +2273,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) pm_irqs = gt_irqs = 0; dev_priv->gt_irq_mask = ~0; - if (HAS_L3_GPU_CACHE(dev)) { + if (HAS_L3_DPF(dev)) { /* L3 parity interrupt is always unmasked. */ - dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); + gt_irqs |= GT_PARITY_ERROR(dev); } gt_irqs |= GT_RENDER_USER_INTERRUPT; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c159e1a6810f..c4f9bef6d073 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -264,6 +264,11 @@ #define MI_SEMAPHORE_SYNC_VVE (1<<16) /* VECS wait for VCS (VEVSYNC) */ #define MI_SEMAPHORE_SYNC_RVE (2<<16) /* VECS wait for RCS (VERSYNC) */ #define MI_SEMAPHORE_SYNC_INVALID (3<<16) + +#define MI_PREDICATE_RESULT_2 (0x2214) +#define LOWER_SLICE_ENABLED (1<<0) +#define LOWER_SLICE_DISABLED (0<<0) + /* * 3D instructions used by the kernel */ @@ -346,6 +351,10 @@ #define IOSF_PORT_PUNIT 0x4 #define IOSF_PORT_NC 0x11 #define IOSF_PORT_DPIO 0x12 +#define IOSF_PORT_GPIO_NC 0x13 +#define IOSF_PORT_CCK 0x14 +#define IOSF_PORT_CCU 0xA9 +#define IOSF_PORT_GPS_CORE 0x48 #define VLV_IOSF_DATA (VLV_DISPLAY_BASE + 0x2104) #define VLV_IOSF_ADDR (VLV_DISPLAY_BASE + 0x2108) @@ -372,6 +381,38 @@ #define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 #define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 +/* vlv2 north clock has */ +#define CCK_REG_DSI_PLL_FUSE 0x44 +#define CCK_REG_DSI_PLL_CONTROL 0x48 +#define DSI_PLL_VCO_EN (1 << 31) +#define DSI_PLL_LDO_GATE (1 << 30) +#define DSI_PLL_P1_POST_DIV_SHIFT 17 +#define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17) +#define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13) +#define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12) +#define DSI_PLL_MUX_MASK (3 << 9) +#define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10) +#define DSI_PLL_MUX_DSI0_CCK (1 << 10) +#define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9) +#define DSI_PLL_MUX_DSI1_CCK (1 << 9) +#define DSI_PLL_CLK_GATE_MASK (0xf << 5) +#define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8) +#define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7) +#define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6) +#define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5) +#define DSI_PLL_LOCK (1 << 0) +#define CCK_REG_DSI_PLL_DIVIDER 0x4c +#define DSI_PLL_LFSR (1 << 31) +#define DSI_PLL_FRACTION_EN (1 << 30) +#define DSI_PLL_FRAC_COUNTER_SHIFT 27 +#define DSI_PLL_FRAC_COUNTER_MASK (7 << 27) +#define DSI_PLL_USYNC_CNT_SHIFT 18 +#define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18) +#define DSI_PLL_N1_DIV_SHIFT 16 +#define DSI_PLL_N1_DIV_MASK (3 << 16) +#define DSI_PLL_M1_DIV_SHIFT 0 +#define DSI_PLL_M1_DIV_MASK (0x1ff << 0) + /* * DPIO - a special bus for various display related registers to hide behind * @@ -886,6 +927,7 @@ #define GT_BLT_USER_INTERRUPT (1 << 22) #define GT_BSD_CS_ERROR_INTERRUPT (1 << 15) #define GT_BSD_USER_INTERRUPT (1 << 12) +#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */ #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */ #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4) #define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3) @@ -896,6 +938,10 @@ #define PM_VEBOX_CS_ERROR_INTERRUPT (1 << 12) /* hsw+ */ #define PM_VEBOX_USER_INTERRUPT (1 << 10) /* hsw+ */ +#define GT_PARITY_ERROR(dev) \ + (GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \ + IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0) + /* These are all the "old" interrupts */ #define ILK_BSD_USER_INTERRUPT (1<<5) #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) @@ -1400,6 +1446,8 @@ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in * every way. It is not accessible from the CP register read instructions. * + * Starting from Haswell, you can't write registers using the MCHBAR mirror, + * just read. */ #define MCHBAR_MIRROR_BASE 0x10000 @@ -2030,6 +2078,7 @@ /* Gen 4 SDVO/HDMI bits: */ #define SDVO_COLOR_FORMAT_8bpc (0 << 26) +#define SDVO_COLOR_FORMAT_MASK (7 << 26) #define SDVO_ENCODING_SDVO (0 << 10) #define SDVO_ENCODING_HDMI (2 << 10) #define HDMI_MODE_SELECT_HDMI (1 << 9) /* HDMI only */ @@ -2982,6 +3031,7 @@ #define PIPECONF_DISABLE 0 #define PIPECONF_DOUBLE_WIDE (1<<30) #define I965_PIPECONF_ACTIVE (1<<30) +#define PIPECONF_DSI_PLL_LOCKED (1<<29) /* vlv & pipe A only */ #define PIPECONF_FRAME_START_DELAY_MASK (3<<27) #define PIPECONF_SINGLE_WIDE 0 #define PIPECONF_PIPE_UNLOCKED 0 @@ -4407,6 +4457,8 @@ #define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) #define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) #define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) +#define PANEL_PORT_SELECT_DPB_VLV (1 << 30) +#define PANEL_PORT_SELECT_DPC_VLV (2 << 30) #define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) #define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) @@ -4438,7 +4490,6 @@ #define PANEL_PORT_SELECT_MASK (3 << 30) #define PANEL_PORT_SELECT_LVDS (0 << 30) #define PANEL_PORT_SELECT_DPA (1 << 30) -#define EDP_PANEL (1 << 30) #define PANEL_PORT_SELECT_DPC (2 << 30) #define PANEL_PORT_SELECT_DPD (3 << 30) #define PANEL_POWER_UP_DELAY_MASK (0x1fff0000) @@ -4447,11 +4498,6 @@ #define PANEL_LIGHT_ON_DELAY_SHIFT 0 #define PCH_PP_OFF_DELAYS 0xc720c -#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30) -#define PANEL_POWER_PORT_LVDS (0 << 30) -#define PANEL_POWER_PORT_DP_A (1 << 30) -#define PANEL_POWER_PORT_DP_C (2 << 30) -#define PANEL_POWER_PORT_DP_D (3 << 30) #define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) #define PANEL_POWER_DOWN_DELAY_SHIFT 16 #define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) @@ -4685,6 +4731,8 @@ #define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 #define GEN6_PCODE_WRITE_RC6VIDS 0x4 #define GEN6_PCODE_READ_RC6VIDS 0x5 +#define GEN6_PCODE_READ_D_COMP 0x10 +#define GEN6_PCODE_WRITE_D_COMP 0x11 #define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) #define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) #define GEN6_PCODE_DATA 0x138128 @@ -4704,6 +4752,7 @@ /* IVYBRIDGE DPF */ #define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */ +#define HSW_L3CDERRST11 0xB208 /* L3CD Error Status register 1 slice 1 */ #define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14) #define GEN7_PARITY_ERROR_VALID (1<<13) #define GEN7_L3CDERRST1_BANK_MASK (3<<11) @@ -4717,6 +4766,7 @@ #define GEN7_L3CDERRST1_ENABLE (1<<7) #define GEN7_L3LOG_BASE 0xB070 +#define HSW_L3LOG_BASE_SLICE1 0xB270 #define GEN7_L3LOG_SIZE 0x80 #define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */ @@ -5116,4 +5166,414 @@ #define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) #define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) +/* VLV MIPI registers */ + +#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) +#define _MIPIB_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) +#define MIPI_PORT_CTRL(pipe) _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL) +#define DPI_ENABLE (1 << 31) /* A + B */ +#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 +#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27) +#define DUAL_LINK_MODE_MASK (1 << 26) +#define DUAL_LINK_MODE_FRONT_BACK (0 << 26) +#define DUAL_LINK_MODE_PIXEL_ALTERNATIVE (1 << 26) +#define DITHERING_ENABLE (1 << 25) /* A + B */ +#define FLOPPED_HSTX (1 << 23) +#define DE_INVERT (1 << 19) /* XXX */ +#define MIPIA_FLISDSI_DELAY_COUNT_SHIFT 18 +#define MIPIA_FLISDSI_DELAY_COUNT_MASK (0xf << 18) +#define AFE_LATCHOUT (1 << 17) +#define LP_OUTPUT_HOLD (1 << 16) +#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT 15 +#define MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK (1 << 15) +#define MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT 11 +#define MIPIB_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 11) +#define CSB_SHIFT 9 +#define CSB_MASK (3 << 9) +#define CSB_20MHZ (0 << 9) +#define CSB_10MHZ (1 << 9) +#define CSB_40MHZ (2 << 9) +#define BANDGAP_MASK (1 << 8) +#define BANDGAP_PNW_CIRCUIT (0 << 8) +#define BANDGAP_LNC_CIRCUIT (1 << 8) +#define MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT 5 +#define MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK (7 << 5) +#define TEARING_EFFECT_DELAY (1 << 4) /* A + B */ +#define TEARING_EFFECT_SHIFT 2 /* A + B */ +#define TEARING_EFFECT_MASK (3 << 2) +#define TEARING_EFFECT_OFF (0 << 2) +#define TEARING_EFFECT_DSI (1 << 2) +#define TEARING_EFFECT_GPIO (2 << 2) +#define LANE_CONFIGURATION_SHIFT 0 +#define LANE_CONFIGURATION_MASK (3 << 0) +#define LANE_CONFIGURATION_4LANE (0 << 0) +#define LANE_CONFIGURATION_DUAL_LINK_A (1 << 0) +#define LANE_CONFIGURATION_DUAL_LINK_B (2 << 0) + +#define _MIPIA_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61194) +#define _MIPIB_TEARING_CTRL (VLV_DISPLAY_BASE + 0x61704) +#define MIPI_TEARING_CTRL(pipe) _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL) +#define TEARING_EFFECT_DELAY_SHIFT 0 +#define TEARING_EFFECT_DELAY_MASK (0xffff << 0) + +/* XXX: all bits reserved */ +#define _MIPIA_AUTOPWG (VLV_DISPLAY_BASE + 0x611a0) + +/* MIPI DSI Controller and D-PHY registers */ + +#define _MIPIA_DEVICE_READY (VLV_DISPLAY_BASE + 0xb000) +#define _MIPIB_DEVICE_READY (VLV_DISPLAY_BASE + 0xb800) +#define MIPI_DEVICE_READY(pipe) _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY) +#define BUS_POSSESSION (1 << 3) /* set to give bus to receiver */ +#define ULPS_STATE_MASK (3 << 1) +#define ULPS_STATE_ENTER (2 << 1) +#define ULPS_STATE_EXIT (1 << 1) +#define ULPS_STATE_NORMAL_OPERATION (0 << 1) +#define DEVICE_READY (1 << 0) + +#define _MIPIA_INTR_STAT (VLV_DISPLAY_BASE + 0xb004) +#define _MIPIB_INTR_STAT (VLV_DISPLAY_BASE + 0xb804) +#define MIPI_INTR_STAT(pipe) _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT) +#define _MIPIA_INTR_EN (VLV_DISPLAY_BASE + 0xb008) +#define _MIPIB_INTR_EN (VLV_DISPLAY_BASE + 0xb808) +#define MIPI_INTR_EN(pipe) _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN) +#define TEARING_EFFECT (1 << 31) +#define SPL_PKT_SENT_INTERRUPT (1 << 30) +#define GEN_READ_DATA_AVAIL (1 << 29) +#define LP_GENERIC_WR_FIFO_FULL (1 << 28) +#define HS_GENERIC_WR_FIFO_FULL (1 << 27) +#define RX_PROT_VIOLATION (1 << 26) +#define RX_INVALID_TX_LENGTH (1 << 25) +#define ACK_WITH_NO_ERROR (1 << 24) +#define TURN_AROUND_ACK_TIMEOUT (1 << 23) +#define LP_RX_TIMEOUT (1 << 22) +#define HS_TX_TIMEOUT (1 << 21) +#define DPI_FIFO_UNDERRUN (1 << 20) +#define LOW_CONTENTION (1 << 19) +#define HIGH_CONTENTION (1 << 18) +#define TXDSI_VC_ID_INVALID (1 << 17) +#define TXDSI_DATA_TYPE_NOT_RECOGNISED (1 << 16) +#define TXCHECKSUM_ERROR (1 << 15) +#define TXECC_MULTIBIT_ERROR (1 << 14) +#define TXECC_SINGLE_BIT_ERROR (1 << 13) +#define TXFALSE_CONTROL_ERROR (1 << 12) +#define RXDSI_VC_ID_INVALID (1 << 11) +#define RXDSI_DATA_TYPE_NOT_REGOGNISED (1 << 10) +#define RXCHECKSUM_ERROR (1 << 9) +#define RXECC_MULTIBIT_ERROR (1 << 8) +#define RXECC_SINGLE_BIT_ERROR (1 << 7) +#define RXFALSE_CONTROL_ERROR (1 << 6) +#define RXHS_RECEIVE_TIMEOUT_ERROR (1 << 5) +#define RX_LP_TX_SYNC_ERROR (1 << 4) +#define RXEXCAPE_MODE_ENTRY_ERROR (1 << 3) +#define RXEOT_SYNC_ERROR (1 << 2) +#define RXSOT_SYNC_ERROR (1 << 1) +#define RXSOT_ERROR (1 << 0) + +#define _MIPIA_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb00c) +#define _MIPIB_DSI_FUNC_PRG (VLV_DISPLAY_BASE + 0xb80c) +#define MIPI_DSI_FUNC_PRG(pipe) _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG) +#define CMD_MODE_DATA_WIDTH_MASK (7 << 13) +#define CMD_MODE_NOT_SUPPORTED (0 << 13) +#define CMD_MODE_DATA_WIDTH_16_BIT (1 << 13) +#define CMD_MODE_DATA_WIDTH_9_BIT (2 << 13) +#define CMD_MODE_DATA_WIDTH_8_BIT (3 << 13) +#define CMD_MODE_DATA_WIDTH_OPTION1 (4 << 13) +#define CMD_MODE_DATA_WIDTH_OPTION2 (5 << 13) +#define VID_MODE_FORMAT_MASK (0xf << 7) +#define VID_MODE_NOT_SUPPORTED (0 << 7) +#define VID_MODE_FORMAT_RGB565 (1 << 7) +#define VID_MODE_FORMAT_RGB666 (2 << 7) +#define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7) +#define VID_MODE_FORMAT_RGB888 (4 << 7) +#define CMD_MODE_CHANNEL_NUMBER_SHIFT 5 +#define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5) +#define VID_MODE_CHANNEL_NUMBER_SHIFT 3 +#define VID_MODE_CHANNEL_NUMBER_MASK (3 << 3) +#define DATA_LANES_PRG_REG_SHIFT 0 +#define DATA_LANES_PRG_REG_MASK (7 << 0) + +#define _MIPIA_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb010) +#define _MIPIB_HS_TX_TIMEOUT (VLV_DISPLAY_BASE + 0xb810) +#define MIPI_HS_TX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT) +#define HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK 0xffffff + +#define _MIPIA_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb014) +#define _MIPIB_LP_RX_TIMEOUT (VLV_DISPLAY_BASE + 0xb814) +#define MIPI_LP_RX_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT) +#define LOW_POWER_RX_TIMEOUT_COUNTER_MASK 0xffffff + +#define _MIPIA_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb018) +#define _MIPIB_TURN_AROUND_TIMEOUT (VLV_DISPLAY_BASE + 0xb818) +#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT) +#define TURN_AROUND_TIMEOUT_MASK 0x3f + +#define _MIPIA_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb01c) +#define _MIPIB_DEVICE_RESET_TIMER (VLV_DISPLAY_BASE + 0xb81c) +#define MIPI_DEVICE_RESET_TIMER(pipe) _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER) +#define DEVICE_RESET_TIMER_MASK 0xffff + +#define _MIPIA_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb020) +#define _MIPIB_DPI_RESOLUTION (VLV_DISPLAY_BASE + 0xb820) +#define MIPI_DPI_RESOLUTION(pipe) _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION) +#define VERTICAL_ADDRESS_SHIFT 16 +#define VERTICAL_ADDRESS_MASK (0xffff << 16) +#define HORIZONTAL_ADDRESS_SHIFT 0 +#define HORIZONTAL_ADDRESS_MASK 0xffff + +#define _MIPIA_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb024) +#define _MIPIB_DBI_FIFO_THROTTLE (VLV_DISPLAY_BASE + 0xb824) +#define MIPI_DBI_FIFO_THROTTLE(pipe) _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE) +#define DBI_FIFO_EMPTY_HALF (0 << 0) +#define DBI_FIFO_EMPTY_QUARTER (1 << 0) +#define DBI_FIFO_EMPTY_7_LOCATIONS (2 << 0) + +/* regs below are bits 15:0 */ +#define _MIPIA_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb028) +#define _MIPIB_HSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb828) +#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT) + +#define _MIPIA_HBP_COUNT (VLV_DISPLAY_BASE + 0xb02c) +#define _MIPIB_HBP_COUNT (VLV_DISPLAY_BASE + 0xb82c) +#define MIPI_HBP_COUNT(pipe) _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT) + +#define _MIPIA_HFP_COUNT (VLV_DISPLAY_BASE + 0xb030) +#define _MIPIB_HFP_COUNT (VLV_DISPLAY_BASE + 0xb830) +#define MIPI_HFP_COUNT(pipe) _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT) + +#define _MIPIA_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb034) +#define _MIPIB_HACTIVE_AREA_COUNT (VLV_DISPLAY_BASE + 0xb834) +#define MIPI_HACTIVE_AREA_COUNT(pipe) _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT) + +#define _MIPIA_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb038) +#define _MIPIB_VSYNC_PADDING_COUNT (VLV_DISPLAY_BASE + 0xb838) +#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT) + +#define _MIPIA_VBP_COUNT (VLV_DISPLAY_BASE + 0xb03c) +#define _MIPIB_VBP_COUNT (VLV_DISPLAY_BASE + 0xb83c) +#define MIPI_VBP_COUNT(pipe) _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT) + +#define _MIPIA_VFP_COUNT (VLV_DISPLAY_BASE + 0xb040) +#define _MIPIB_VFP_COUNT (VLV_DISPLAY_BASE + 0xb840) +#define MIPI_VFP_COUNT(pipe) _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT) + +#define _MIPIA_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb044) +#define _MIPIB_HIGH_LOW_SWITCH_COUNT (VLV_DISPLAY_BASE + 0xb844) +#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe) _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT) +/* regs above are bits 15:0 */ + +#define _MIPIA_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb048) +#define _MIPIB_DPI_CONTROL (VLV_DISPLAY_BASE + 0xb848) +#define MIPI_DPI_CONTROL(pipe) _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL) +#define DPI_LP_MODE (1 << 6) +#define BACKLIGHT_OFF (1 << 5) +#define BACKLIGHT_ON (1 << 4) +#define COLOR_MODE_OFF (1 << 3) +#define COLOR_MODE_ON (1 << 2) +#define TURN_ON (1 << 1) +#define SHUTDOWN (1 << 0) + +#define _MIPIA_DPI_DATA (VLV_DISPLAY_BASE + 0xb04c) +#define _MIPIB_DPI_DATA (VLV_DISPLAY_BASE + 0xb84c) +#define MIPI_DPI_DATA(pipe) _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA) +#define COMMAND_BYTE_SHIFT 0 +#define COMMAND_BYTE_MASK (0x3f << 0) + +#define _MIPIA_INIT_COUNT (VLV_DISPLAY_BASE + 0xb050) +#define _MIPIB_INIT_COUNT (VLV_DISPLAY_BASE + 0xb850) +#define MIPI_INIT_COUNT(pipe) _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT) +#define MASTER_INIT_TIMER_SHIFT 0 +#define MASTER_INIT_TIMER_MASK (0xffff << 0) + +#define _MIPIA_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb054) +#define _MIPIB_MAX_RETURN_PKT_SIZE (VLV_DISPLAY_BASE + 0xb854) +#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE) +#define MAX_RETURN_PKT_SIZE_SHIFT 0 +#define MAX_RETURN_PKT_SIZE_MASK (0x3ff << 0) + +#define _MIPIA_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb058) +#define _MIPIB_VIDEO_MODE_FORMAT (VLV_DISPLAY_BASE + 0xb858) +#define MIPI_VIDEO_MODE_FORMAT(pipe) _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT) +#define RANDOM_DPI_DISPLAY_RESOLUTION (1 << 4) +#define DISABLE_VIDEO_BTA (1 << 3) +#define IP_TG_CONFIG (1 << 2) +#define VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE (1 << 0) +#define VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS (2 << 0) +#define VIDEO_MODE_BURST (3 << 0) + +#define _MIPIA_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb05c) +#define _MIPIB_EOT_DISABLE (VLV_DISPLAY_BASE + 0xb85c) +#define MIPI_EOT_DISABLE(pipe) _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE) +#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) +#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) +#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) +#define HIGH_CONTENTION_RECOVERY_DISABLE (1 << 4) +#define TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3) +#define TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE (1 << 2) +#define CLOCKSTOP (1 << 1) +#define EOT_DISABLE (1 << 0) + +#define _MIPIA_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb060) +#define _MIPIB_LP_BYTECLK (VLV_DISPLAY_BASE + 0xb860) +#define MIPI_LP_BYTECLK(pipe) _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK) +#define LP_BYTECLK_SHIFT 0 +#define LP_BYTECLK_MASK (0xffff << 0) + +/* bits 31:0 */ +#define _MIPIA_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb064) +#define _MIPIB_LP_GEN_DATA (VLV_DISPLAY_BASE + 0xb864) +#define MIPI_LP_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA) + +/* bits 31:0 */ +#define _MIPIA_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb068) +#define _MIPIB_HS_GEN_DATA (VLV_DISPLAY_BASE + 0xb868) +#define MIPI_HS_GEN_DATA(pipe) _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA) + +#define _MIPIA_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb06c) +#define _MIPIB_LP_GEN_CTRL (VLV_DISPLAY_BASE + 0xb86c) +#define MIPI_LP_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL) +#define _MIPIA_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb070) +#define _MIPIB_HS_GEN_CTRL (VLV_DISPLAY_BASE + 0xb870) +#define MIPI_HS_GEN_CTRL(pipe) _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL) +#define LONG_PACKET_WORD_COUNT_SHIFT 8 +#define LONG_PACKET_WORD_COUNT_MASK (0xffff << 8) +#define SHORT_PACKET_PARAM_SHIFT 8 +#define SHORT_PACKET_PARAM_MASK (0xffff << 8) +#define VIRTUAL_CHANNEL_SHIFT 6 +#define VIRTUAL_CHANNEL_MASK (3 << 6) +#define DATA_TYPE_SHIFT 0 +#define DATA_TYPE_MASK (3f << 0) +/* data type values, see include/video/mipi_display.h */ + +#define _MIPIA_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb074) +#define _MIPIB_GEN_FIFO_STAT (VLV_DISPLAY_BASE + 0xb874) +#define MIPI_GEN_FIFO_STAT(pipe) _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT) +#define DPI_FIFO_EMPTY (1 << 28) +#define DBI_FIFO_EMPTY (1 << 27) +#define LP_CTRL_FIFO_EMPTY (1 << 26) +#define LP_CTRL_FIFO_HALF_EMPTY (1 << 25) +#define LP_CTRL_FIFO_FULL (1 << 24) +#define HS_CTRL_FIFO_EMPTY (1 << 18) +#define HS_CTRL_FIFO_HALF_EMPTY (1 << 17) +#define HS_CTRL_FIFO_FULL (1 << 16) +#define LP_DATA_FIFO_EMPTY (1 << 10) +#define LP_DATA_FIFO_HALF_EMPTY (1 << 9) +#define LP_DATA_FIFO_FULL (1 << 8) +#define HS_DATA_FIFO_EMPTY (1 << 2) +#define HS_DATA_FIFO_HALF_EMPTY (1 << 1) +#define HS_DATA_FIFO_FULL (1 << 0) + +#define _MIPIA_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb078) +#define _MIPIB_HS_LS_DBI_ENABLE (VLV_DISPLAY_BASE + 0xb878) +#define MIPI_HS_LP_DBI_ENABLE(pipe) _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE) +#define DBI_HS_LP_MODE_MASK (1 << 0) +#define DBI_LP_MODE (1 << 0) +#define DBI_HS_MODE (0 << 0) + +#define _MIPIA_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb080) +#define _MIPIB_DPHY_PARAM (VLV_DISPLAY_BASE + 0xb880) +#define MIPI_DPHY_PARAM(pipe) _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM) +#define EXIT_ZERO_COUNT_SHIFT 24 +#define EXIT_ZERO_COUNT_MASK (0x3f << 24) +#define TRAIL_COUNT_SHIFT 16 +#define TRAIL_COUNT_MASK (0x1f << 16) +#define CLK_ZERO_COUNT_SHIFT 8 +#define CLK_ZERO_COUNT_MASK (0xff << 8) +#define PREPARE_COUNT_SHIFT 0 +#define PREPARE_COUNT_MASK (0x3f << 0) + +/* bits 31:0 */ +#define _MIPIA_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb084) +#define _MIPIB_DBI_BW_CTRL (VLV_DISPLAY_BASE + 0xb884) +#define MIPI_DBI_BW_CTRL(pipe) _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL) + +#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb088) +#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT (VLV_DISPLAY_BASE + 0xb888) +#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe) _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT) +#define LP_HS_SSW_CNT_SHIFT 16 +#define LP_HS_SSW_CNT_MASK (0xffff << 16) +#define HS_LP_PWR_SW_CNT_SHIFT 0 +#define HS_LP_PWR_SW_CNT_MASK (0xffff << 0) + +#define _MIPIA_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb08c) +#define _MIPIB_STOP_STATE_STALL (VLV_DISPLAY_BASE + 0xb88c) +#define MIPI_STOP_STATE_STALL(pipe) _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL) +#define STOP_STATE_STALL_COUNTER_SHIFT 0 +#define STOP_STATE_STALL_COUNTER_MASK (0xff << 0) + +#define _MIPIA_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb090) +#define _MIPIB_INTR_STAT_REG_1 (VLV_DISPLAY_BASE + 0xb890) +#define MIPI_INTR_STAT_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1) +#define _MIPIA_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb094) +#define _MIPIB_INTR_EN_REG_1 (VLV_DISPLAY_BASE + 0xb894) +#define MIPI_INTR_EN_REG_1(pipe) _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1) +#define RX_CONTENTION_DETECTED (1 << 0) + +/* XXX: only pipe A ?!? */ +#define MIPIA_DBI_TYPEC_CTRL (VLV_DISPLAY_BASE + 0xb100) +#define DBI_TYPEC_ENABLE (1 << 31) +#define DBI_TYPEC_WIP (1 << 30) +#define DBI_TYPEC_OPTION_SHIFT 28 +#define DBI_TYPEC_OPTION_MASK (3 << 28) +#define DBI_TYPEC_FREQ_SHIFT 24 +#define DBI_TYPEC_FREQ_MASK (0xf << 24) +#define DBI_TYPEC_OVERRIDE (1 << 8) +#define DBI_TYPEC_OVERRIDE_COUNTER_SHIFT 0 +#define DBI_TYPEC_OVERRIDE_COUNTER_MASK (0xff << 0) + + +/* MIPI adapter registers */ + +#define _MIPIA_CTRL (VLV_DISPLAY_BASE + 0xb104) +#define _MIPIB_CTRL (VLV_DISPLAY_BASE + 0xb904) +#define MIPI_CTRL(pipe) _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL) +#define ESCAPE_CLOCK_DIVIDER_SHIFT 5 /* A only */ +#define ESCAPE_CLOCK_DIVIDER_MASK (3 << 5) +#define ESCAPE_CLOCK_DIVIDER_1 (0 << 5) +#define ESCAPE_CLOCK_DIVIDER_2 (1 << 5) +#define ESCAPE_CLOCK_DIVIDER_4 (2 << 5) +#define READ_REQUEST_PRIORITY_SHIFT 3 +#define READ_REQUEST_PRIORITY_MASK (3 << 3) +#define READ_REQUEST_PRIORITY_LOW (0 << 3) +#define READ_REQUEST_PRIORITY_HIGH (3 << 3) +#define RGB_FLIP_TO_BGR (1 << 2) + +#define _MIPIA_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb108) +#define _MIPIB_DATA_ADDRESS (VLV_DISPLAY_BASE + 0xb908) +#define MIPI_DATA_ADDRESS(pipe) _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS) +#define DATA_MEM_ADDRESS_SHIFT 5 +#define DATA_MEM_ADDRESS_MASK (0x7ffffff << 5) +#define DATA_VALID (1 << 0) + +#define _MIPIA_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb10c) +#define _MIPIB_DATA_LENGTH (VLV_DISPLAY_BASE + 0xb90c) +#define MIPI_DATA_LENGTH(pipe) _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH) +#define DATA_LENGTH_SHIFT 0 +#define DATA_LENGTH_MASK (0xfffff << 0) + +#define _MIPIA_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb110) +#define _MIPIB_COMMAND_ADDRESS (VLV_DISPLAY_BASE + 0xb910) +#define MIPI_COMMAND_ADDRESS(pipe) _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS) +#define COMMAND_MEM_ADDRESS_SHIFT 5 +#define COMMAND_MEM_ADDRESS_MASK (0x7ffffff << 5) +#define AUTO_PWG_ENABLE (1 << 2) +#define MEMORY_WRITE_DATA_FROM_PIPE_RENDERING (1 << 1) +#define COMMAND_VALID (1 << 0) + +#define _MIPIA_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb114) +#define _MIPIB_COMMAND_LENGTH (VLV_DISPLAY_BASE + 0xb914) +#define MIPI_COMMAND_LENGTH(pipe) _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH) +#define COMMAND_LENGTH_SHIFT(n) (8 * (n)) /* n: 0...3 */ +#define COMMAND_LENGTH_MASK(n) (0xff << (8 * (n))) + +#define _MIPIA_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb118) +#define _MIPIB_READ_DATA_RETURN0 (VLV_DISPLAY_BASE + 0xb918) +#define MIPI_READ_DATA_RETURN(pipe, n) \ + (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */ + +#define _MIPIA_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb138) +#define _MIPIB_READ_DATA_VALID (VLV_DISPLAY_BASE + 0xb938) +#define MIPI_READ_DATA_VALID(pipe) _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID) +#define READ_DATA_VALID(n) (1 << (n)) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 70db618989c4..3538370e3a47 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -340,7 +340,9 @@ int i915_save_state(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB); + if (INTEL_INFO(dev)->gen <= 4) + pci_read_config_byte(dev->pdev, LBB, + &dev_priv->regfile.saveLBB); mutex_lock(&dev->struct_mutex); @@ -390,7 +392,9 @@ int i915_restore_state(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB); + if (INTEL_INFO(dev)->gen <= 4) + pci_write_config_byte(dev->pdev, LBB, + dev_priv->regfile.saveLBB); mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index c8c4112de110..44f4c1a6f7b1 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -65,6 +65,8 @@ show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); + if (IS_VALLEYVIEW(dminor->dev)) + rc6p_residency = 0; return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); } @@ -73,6 +75,8 @@ show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev); u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); + if (IS_VALLEYVIEW(dminor->dev)) + rc6pp_residency = 0; return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); } @@ -97,7 +101,7 @@ static struct attribute_group rc6_attr_group = { static int l3_access_valid(struct drm_device *dev, loff_t offset) { - if (!HAS_L3_GPU_CACHE(dev)) + if (!HAS_L3_DPF(dev)) return -EPERM; if (offset % 4 != 0) @@ -118,28 +122,31 @@ i915_l3_read(struct file *filp, struct kobject *kobj, struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); struct drm_device *drm_dev = dminor->dev; struct drm_i915_private *dev_priv = drm_dev->dev_private; - uint32_t misccpctl; - int i, ret; + int slice = (int)(uintptr_t)attr->private; + int ret; + + count = round_down(count, 4); ret = l3_access_valid(drm_dev, offset); if (ret) return ret; + count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count); + ret = i915_mutex_lock_interruptible(drm_dev); if (ret) return ret; - misccpctl = I915_READ(GEN7_MISCCPCTL); - I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); - - for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4) - *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i); - - I915_WRITE(GEN7_MISCCPCTL, misccpctl); + if (dev_priv->l3_parity.remap_info[slice]) + memcpy(buf, + dev_priv->l3_parity.remap_info[slice] + (offset/4), + count); + else + memset(buf, 0, count); mutex_unlock(&drm_dev->struct_mutex); - return i - offset; + return count; } static ssize_t @@ -151,18 +158,23 @@ i915_l3_write(struct file *filp, struct kobject *kobj, struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev); struct drm_device *drm_dev = dminor->dev; struct drm_i915_private *dev_priv = drm_dev->dev_private; + struct i915_hw_context *ctx; u32 *temp = NULL; /* Just here to make handling failures easy */ + int slice = (int)(uintptr_t)attr->private; int ret; ret = l3_access_valid(drm_dev, offset); if (ret) return ret; + if (dev_priv->hw_contexts_disabled) + return -ENXIO; + ret = i915_mutex_lock_interruptible(drm_dev); if (ret) return ret; - if (!dev_priv->l3_parity.remap_info) { + if (!dev_priv->l3_parity.remap_info[slice]) { temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); if (!temp) { mutex_unlock(&drm_dev->struct_mutex); @@ -182,13 +194,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj, * at this point it is left as a TODO. */ if (temp) - dev_priv->l3_parity.remap_info = temp; + dev_priv->l3_parity.remap_info[slice] = temp; - memcpy(dev_priv->l3_parity.remap_info + (offset/4), - buf + (offset/4), - count); + memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count); - i915_gem_l3_remap(drm_dev); + /* NB: We defer the remapping until we switch to the context */ + list_for_each_entry(ctx, &dev_priv->context_list, link) + ctx->remap_slice |= (1<struct_mutex); @@ -200,7 +212,17 @@ static struct bin_attribute dpf_attrs = { .size = GEN7_L3LOG_SIZE, .read = i915_l3_read, .write = i915_l3_write, - .mmap = NULL + .mmap = NULL, + .private = (void *)0 +}; + +static struct bin_attribute dpf_attrs_1 = { + .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)}, + .size = GEN7_L3LOG_SIZE, + .read = i915_l3_read, + .write = i915_l3_write, + .mmap = NULL, + .private = (void *)1 }; static ssize_t gt_cur_freq_mhz_show(struct device *kdev, @@ -507,10 +529,17 @@ void i915_setup_sysfs(struct drm_device *dev) DRM_ERROR("RC6 residency sysfs setup failed\n"); } #endif - if (HAS_L3_GPU_CACHE(dev)) { + if (HAS_L3_DPF(dev)) { ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs); if (ret) DRM_ERROR("l3 parity sysfs setup failed\n"); + + if (NUM_L3_SLICES(dev) > 1) { + ret = device_create_bin_file(&dev->primary->kdev, + &dpf_attrs_1); + if (ret) + DRM_ERROR("l3 parity slice 1 setup failed\n"); + } } ret = 0; @@ -534,6 +563,7 @@ void i915_teardown_sysfs(struct drm_device *dev) sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs); else sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); + device_remove_bin_file(&dev->primary->kdev, &dpf_attrs_1); device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); #ifdef CONFIG_PM sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 53f2bed8bc5f..6668873fb3a8 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -568,6 +568,21 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) } } +static void +parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb) +{ + struct bdb_mipi *mipi; + + mipi = find_section(bdb, BDB_MIPI); + if (!mipi) { + DRM_DEBUG_KMS("No MIPI BDB found"); + return; + } + + /* XXX: add more info */ + dev_priv->vbt.dsi.panel_id = mipi->panel_id; +} + static void parse_device_mapping(struct drm_i915_private *dev_priv, struct bdb_header *bdb) @@ -745,6 +760,7 @@ intel_parse_bios(struct drm_device *dev) parse_device_mapping(dev_priv, bdb); parse_driver_features(dev_priv, bdb); parse_edp(dev_priv, bdb); + parse_mipi(dev_priv, bdb); if (bios) pci_unmap_rom(pdev, bios); diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index e088d6f0956a..6e9250eb9c2c 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -104,6 +104,7 @@ struct vbios_data { #define BDB_LVDS_LFP_DATA 42 #define BDB_LVDS_BACKLIGHT 43 #define BDB_LVDS_POWER 44 +#define BDB_MIPI 50 #define BDB_SKIP 254 /* VBIOS private block, ignore */ struct bdb_general_features { @@ -618,4 +619,44 @@ int intel_parse_bios(struct drm_device *dev); #define PORT_IDPC 8 #define PORT_IDPD 9 +/* MIPI DSI panel info */ +struct bdb_mipi { + u16 panel_id; + u16 bridge_revision; + + /* General params */ + u32 dithering:1; + u32 bpp_pixel_format:1; + u32 rsvd1:1; + u32 dphy_valid:1; + u32 resvd2:28; + + u16 port_info; + u16 rsvd3:2; + u16 num_lanes:2; + u16 rsvd4:12; + + /* DSI config */ + u16 virt_ch_num:2; + u16 vtm:2; + u16 rsvd5:12; + + u32 dsi_clock; + u32 bridge_ref_clk; + u16 rsvd_pwr; + + /* Dphy Params */ + u32 prepare_cnt:5; + u32 rsvd6:3; + u32 clk_zero_cnt:8; + u32 trail_cnt:5; + u32 rsvd7:3; + u32 exit_zero_cnt:6; + u32 rsvd8:2; + + u32 hl_switch_cnt; + u32 lp_byte_clk; + u32 clk_lane_switch_cnt; +} __attribute__((packed)); + #endif /* _I830_BIOS_H_ */ diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index ea9022ef15d5..6f101d5620e4 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -89,6 +89,7 @@ static void intel_crt_get_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct intel_crt *crt = intel_encoder_to_crt(encoder); u32 tmp, flags = 0; + int dotclock; tmp = I915_READ(crt->adpa_reg); @@ -103,6 +104,13 @@ static void intel_crt_get_config(struct intel_encoder *encoder, flags |= DRM_MODE_FLAG_NVSYNC; pipe_config->adjusted_mode.flags |= flags; + + dotclock = pipe_config->port_clock; + + if (HAS_PCH_SPLIT(dev_priv->dev)) + ironlake_check_encoder_dotclock(pipe_config, dotclock); + + pipe_config->adjusted_mode.clock = dotclock; } /* Note: The caller is required to filter out dpms modes not supported by the @@ -349,9 +357,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); - /* FIXME: debug force function and remove */ - ret = true; - return ret; } diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 63de2701b974..04f68804834d 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -58,7 +58,7 @@ static const u32 hsw_ddi_translations_fdi[] = { 0x00FFFFFF, 0x00040006 /* HDMI parameters */ }; -static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) +enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; int type = intel_encoder->type; @@ -767,9 +767,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc) BUG(); } - if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) + if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC) temp |= TRANS_DDI_PVSYNC; - if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) + if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC) temp |= TRANS_DDI_PHSYNC; if (cpu_transcoder == TRANSCODER_EDP) { @@ -1268,6 +1268,37 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, flags |= DRM_MODE_FLAG_NVSYNC; pipe_config->adjusted_mode.flags |= flags; + + switch (temp & TRANS_DDI_BPC_MASK) { + case TRANS_DDI_BPC_6: + pipe_config->pipe_bpp = 18; + break; + case TRANS_DDI_BPC_8: + pipe_config->pipe_bpp = 24; + break; + case TRANS_DDI_BPC_10: + pipe_config->pipe_bpp = 30; + break; + case TRANS_DDI_BPC_12: + pipe_config->pipe_bpp = 36; + break; + default: + break; + } + + switch (temp & TRANS_DDI_MODE_SELECT_MASK) { + case TRANS_DDI_MODE_SELECT_HDMI: + case TRANS_DDI_MODE_SELECT_DVI: + case TRANS_DDI_MODE_SELECT_FDI: + break; + case TRANS_DDI_MODE_SELECT_DP_SST: + case TRANS_DDI_MODE_SELECT_DP_MST: + pipe_config->has_dp_encoder = true; + intel_dp_get_m_n(intel_crtc, pipe_config); + break; + default: + break; + } } static void intel_ddi_destroy(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e5822e79f912..bef786461a3f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -47,8 +47,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); static void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config); -static void ironlake_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_config *pipe_config); +static void ironlake_pch_clock_get(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config); static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *old_fb); @@ -69,9 +69,6 @@ struct intel_limit { intel_p2_t p2; }; -/* FDI */ -#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ - int intel_pch_rawclk(struct drm_device *dev) { @@ -339,19 +336,6 @@ static const intel_limit_t intel_limits_vlv_hdmi = { .p2_slow = 2, .p2_fast = 20 }, }; -static const intel_limit_t intel_limits_vlv_dp = { - .dot = { .min = 25000, .max = 270000 }, - .vco = { .min = 4000000, .max = 6000000 }, - .n = { .min = 1, .max = 7 }, - .m = { .min = 22, .max = 450 }, - .m1 = { .min = 2, .max = 3 }, - .m2 = { .min = 11, .max = 156 }, - .p = { .min = 10, .max = 30 }, - .p1 = { .min = 1, .max = 3 }, - .p2 = { .dot_limit = 270000, - .p2_slow = 2, .p2_fast = 20 }, -}; - static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, int refclk) { @@ -414,10 +398,8 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) } else if (IS_VALLEYVIEW(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) limit = &intel_limits_vlv_dac; - else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) - limit = &intel_limits_vlv_hdmi; else - limit = &intel_limits_vlv_dp; + limit = &intel_limits_vlv_hdmi; } else if (!IS_GEN2(dev)) { if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i9xx_lvds; @@ -751,6 +733,23 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, return true; } +bool intel_crtc_active(struct drm_crtc *crtc) +{ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + /* Be paranoid as we can arrive here with only partial + * state retrieved from the hardware during setup. + * + * We can ditch the adjusted_mode.clock check as soon + * as Haswell has gained clock readout/fastboot support. + * + * We can ditch the crtc->fb check as soon as we can + * properly reconstruct framebuffers. + */ + return intel_crtc->active && crtc->fb && + intel_crtc->config.adjusted_mode.clock; +} + enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -929,6 +928,24 @@ void assert_pll(struct drm_i915_private *dev_priv, state_string(state), state_string(cur_state)); } +/* XXX: the dsi pll is shared between MIPI DSI ports */ +static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) +{ + u32 val; + bool cur_state; + + mutex_lock(&dev_priv->dpio_lock); + val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); + mutex_unlock(&dev_priv->dpio_lock); + + cur_state = val & DSI_PLL_VCO_EN; + WARN(cur_state != state, + "DSI PLL state assertion failure (expected %s, current %s)\n", + state_string(state), state_string(cur_state)); +} +#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) +#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) + struct intel_shared_dpll * intel_crtc_to_shared_dpll(struct intel_crtc *crtc) { @@ -1069,6 +1086,26 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv, pipe_name(pipe)); } +static void assert_cursor(struct drm_i915_private *dev_priv, + enum pipe pipe, bool state) +{ + struct drm_device *dev = dev_priv->dev; + bool cur_state; + + if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) + cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; + else if (IS_845G(dev) || IS_I865G(dev)) + cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; + else + cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; + + WARN(cur_state != state, + "cursor on pipe %c assertion failure (expected %s, current %s)\n", + pipe_name(pipe), state_string(state), state_string(cur_state)); +} +#define assert_cursor_enabled(d, p) assert_cursor(d, p, true) +#define assert_cursor_disabled(d, p) assert_cursor(d, p, false) + void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { @@ -1661,7 +1698,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) * returning. */ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, - bool pch_port) + bool pch_port, bool dsi) { enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, pipe); @@ -1670,6 +1707,7 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, u32 val; assert_planes_disabled(dev_priv, pipe); + assert_cursor_disabled(dev_priv, pipe); assert_sprites_disabled(dev_priv, pipe); if (HAS_PCH_LPT(dev_priv->dev)) @@ -1683,7 +1721,10 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, * need the check. */ if (!HAS_PCH_SPLIT(dev_priv->dev)) - assert_pll_enabled(dev_priv, pipe); + if (dsi) + assert_dsi_pll_enabled(dev_priv); + else + assert_pll_enabled(dev_priv, pipe); else { if (pch_port) { /* if driving the PCH, we need FDI enabled */ @@ -1728,6 +1769,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv, * or we might hang the display. */ assert_planes_disabled(dev_priv, pipe); + assert_cursor_disabled(dev_priv, pipe); assert_sprites_disabled(dev_priv, pipe); /* Don't disable pipe A or pipe A PLLs if needed */ @@ -2872,6 +2914,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; + int clock = to_intel_crtc(crtc)->config.adjusted_mode.clock; u32 divsel, phaseinc, auxdiv, phasedir = 0; u32 temp; @@ -2889,13 +2932,13 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) SBI_ICLK); /* 20MHz is a corner case which is out of range for the 7-bit divisor */ - if (crtc->mode.clock == 20000) { + if (clock == 20000) { auxdiv = 1; divsel = 0x41; phaseinc = 0x20; } else { /* The iCLK virtual clock root frequency is in MHz, - * but the crtc->mode.clock in in KHz. To get the divisors, + * but the adjusted_mode->clock in in KHz. To get the divisors, * it is necessary to divide one by another, so we * convert the virtual clock precision to KHz here for higher * precision. @@ -2904,7 +2947,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) u32 iclk_pi_range = 64; u32 desired_divisor, msb_divisor_value, pi_value; - desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock); + desired_divisor = (iclk_virtual_root_freq / clock); msb_divisor_value = desired_divisor / iclk_pi_range; pi_value = desired_divisor % iclk_pi_range; @@ -2920,7 +2963,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) ~SBI_SSCDIVINTPHASE_INCVAL_MASK); DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", - crtc->mode.clock, + clock, auxdiv, divsel, phasedir, @@ -3259,8 +3302,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); intel_set_pch_fifo_underrun_reporting(dev, pipe, true); - intel_update_watermarks(dev); - for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) encoder->pre_enable(encoder); @@ -3283,8 +3324,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) */ intel_crtc_load_lut(crtc); + intel_update_watermarks(crtc); intel_enable_pipe(dev_priv, pipe, - intel_crtc->config.has_pch_encoder); + intel_crtc->config.has_pch_encoder, false); intel_enable_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); intel_crtc_update_cursor(crtc, true); @@ -3344,6 +3386,7 @@ static void hsw_disable_ips(struct intel_crtc *crtc) assert_plane_enabled(dev_priv, crtc->plane); I915_WRITE(IPS_CTL, 0); + POSTING_READ(IPS_CTL); /* We need to wait for a vblank before we can disable the plane. */ intel_wait_for_vblank(dev, crtc->pipe); @@ -3369,8 +3412,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config.has_pch_encoder) intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); - intel_update_watermarks(dev); - if (intel_crtc->config.has_pch_encoder) dev_priv->display.fdi_link_train(crtc); @@ -3391,8 +3432,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_ddi_set_pipe_settings(crtc); intel_ddi_enable_transcoder_func(crtc); + intel_update_watermarks(crtc); intel_enable_pipe(dev_priv, pipe, - intel_crtc->config.has_pch_encoder); + intel_crtc->config.has_pch_encoder, false); intel_enable_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); intel_crtc_update_cursor(crtc, true); @@ -3406,8 +3448,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_update_fbc(dev); mutex_unlock(&dev->struct_mutex); - for_each_encoder_on_crtc(dev, crtc, encoder) + for_each_encoder_on_crtc(dev, crtc, encoder) { encoder->enable(encoder); + intel_opregion_notify_encoder(encoder, true); + } /* * There seems to be a race in PCH platform hw (at least on some @@ -3501,7 +3545,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) } intel_crtc->active = false; - intel_update_watermarks(dev); + intel_update_watermarks(crtc); mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); @@ -3521,8 +3565,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) if (!intel_crtc->active) return; - for_each_encoder_on_crtc(dev, crtc, encoder) + for_each_encoder_on_crtc(dev, crtc, encoder) { + intel_opregion_notify_encoder(encoder, false); encoder->disable(encoder); + } intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); @@ -3558,7 +3604,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) } intel_crtc->active = false; - intel_update_watermarks(dev); + intel_update_watermarks(crtc); mutex_lock(&dev->struct_mutex); intel_update_fbc(dev); @@ -3650,6 +3696,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) struct intel_encoder *encoder; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; + bool is_dsi; WARN_ON(!crtc->enabled); @@ -3657,13 +3704,15 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) return; intel_crtc->active = true; - intel_update_watermarks(dev); for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_pll_enable) encoder->pre_pll_enable(encoder); - vlv_enable_pll(intel_crtc); + is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI); + + if (!is_dsi) + vlv_enable_pll(intel_crtc); for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) @@ -3673,7 +3722,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_crtc_load_lut(crtc); - intel_enable_pipe(dev_priv, pipe, false); + intel_update_watermarks(crtc); + intel_enable_pipe(dev_priv, pipe, false, is_dsi); intel_enable_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); intel_crtc_update_cursor(crtc, true); @@ -3699,7 +3749,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) return; intel_crtc->active = true; - intel_update_watermarks(dev); for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) @@ -3711,7 +3760,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_crtc_load_lut(crtc); - intel_enable_pipe(dev_priv, pipe, false); + intel_update_watermarks(crtc); + intel_enable_pipe(dev_priv, pipe, false, false); intel_enable_plane(dev_priv, plane, pipe); intel_enable_planes(crtc); /* The fixup needs to happen before cursor is enabled */ @@ -3778,11 +3828,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) if (encoder->post_disable) encoder->post_disable(encoder); - i9xx_disable_pll(dev_priv, pipe); + if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) + i9xx_disable_pll(dev_priv, pipe); intel_crtc->active = false; + intel_update_watermarks(crtc); + intel_update_fbc(dev); - intel_update_watermarks(dev); } static void i9xx_crtc_off(struct drm_crtc *crtc) @@ -3856,6 +3908,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc) dev_priv->display.off(crtc); assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane); + assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe); assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe); if (crtc->fb) { @@ -4050,7 +4103,6 @@ retry: link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; fdi_dotclock = adjusted_mode->clock; - fdi_dotclock /= pipe_config->pixel_multiplier; lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, pipe_config->pipe_bpp); @@ -4092,13 +4144,39 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; - if (HAS_PCH_SPLIT(dev)) { - /* FDI link clock is fixed at 2.7G */ - if (pipe_config->requested_mode.clock * 3 - > IRONLAKE_FDI_FREQ * 4) + /* FIXME should check pixel clock limits on all platforms */ + if (INTEL_INFO(dev)->gen < 4) { + struct drm_i915_private *dev_priv = dev->dev_private; + int clock_limit = + dev_priv->display.get_display_clock_speed(dev); + + /* + * Enable pixel doubling when the dot clock + * is > 90% of the (display) core speed. + * + * GDG double wide on either pipe, + * otherwise pipe A only. + */ + if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && + adjusted_mode->clock > clock_limit * 9 / 10) { + clock_limit *= 2; + pipe_config->double_wide = true; + } + + if (adjusted_mode->clock > clock_limit * 9 / 10) return -EINVAL; } + /* + * Pipe horizontal size must be even in: + * - DVO ganged mode + * - LVDS dual channel mode + * - Double wide pipe + */ + if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && + intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) + pipe_config->pipe_src_w &= ~1; + /* Cantiga+ cannot handle modes with a hsync front porch of 0. * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. */ @@ -4262,28 +4340,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } -static int vlv_get_refclk(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int refclk = 27000; /* for DP & HDMI */ - - return 100000; /* only one validated so far */ - - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { - refclk = 96000; - } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { - if (intel_panel_use_ssc(dev_priv)) - refclk = 100000; - else - refclk = 96000; - } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { - refclk = 100000; - } - - return refclk; -} - static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) { struct drm_device *dev = crtc->dev; @@ -4291,7 +4347,7 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) int refclk; if (IS_VALLEYVIEW(dev)) { - refclk = vlv_get_refclk(crtc); + refclk = 100000; } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { refclk = dev_priv->vbt.lvds_ssc_freq * 1000; @@ -4349,7 +4405,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc, } } -static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv) +static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe + pipe) { u32 reg_val; @@ -4357,24 +4414,24 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv) * PLLB opamp always calibrates to max value of 0x3f, force enable it * and set it to a reasonable value instead. */ - reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1)); + reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1)); reg_val &= 0xffffff00; reg_val |= 0x00000030; - vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val); + vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val); - reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION); + reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION); reg_val &= 0x8cffffff; reg_val = 0x8c000000; - vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val); + vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val); - reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1)); + reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1)); reg_val &= 0xffffff00; - vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val); + vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val); - reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION); + reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION); reg_val &= 0x00ffffff; reg_val |= 0xb0000000; - vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val); + vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val); } static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, @@ -4440,18 +4497,18 @@ static void vlv_update_pll(struct intel_crtc *crtc) /* PLL B needs special handling */ if (pipe) - vlv_pllb_recal_opamp(dev_priv); + vlv_pllb_recal_opamp(dev_priv, pipe); /* Set up Tx target for periodic Rcomp update */ - vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f); + vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f); /* Disable target IRef on PLL */ - reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe)); + reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe)); reg_val &= 0x00ffffff; - vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val); + vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val); /* Disable fast lock */ - vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610); + vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610); /* Set idtafcrecal before PLL is enabled */ mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); @@ -4465,48 +4522,48 @@ static void vlv_update_pll(struct intel_crtc *crtc) * Note: don't use the DAC post divider as it seems unstable. */ mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); - vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); + vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv); mdiv |= DPIO_ENABLE_CALIBRATION; - vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); + vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv); /* Set HBR and RBR LPF coefficients */ if (crtc->config.port_clock == 162000 || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) - vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), + vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe), 0x009f0003); else - vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe), + vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe), 0x00d0000f); if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) { /* Use SSC source */ if (!pipe) - vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), + vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe), 0x0df40000); else - vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), + vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe), 0x0df70000); } else { /* HDMI or VGA */ /* Use bend source */ if (!pipe) - vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), + vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe), 0x0df70000); else - vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe), + vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe), 0x0df40000); } - coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe)); + coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe)); coreclk = (coreclk & 0x0000ff00) | 0x01c00000; if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) coreclk |= 0x01000000; - vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk); + vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk); - vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000); + vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000); /* Enable DPIO clock input */ dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | @@ -4651,7 +4708,6 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; struct drm_display_mode *adjusted_mode = &intel_crtc->config.adjusted_mode; - struct drm_display_mode *mode = &intel_crtc->config.requested_mode; uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end; /* We need to be careful not to changed the adjusted mode, for otherwise @@ -4704,7 +4760,8 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) * always be the user's requested size. */ I915_WRITE(PIPESRC(pipe), - ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); + ((intel_crtc->config.pipe_src_w - 1) << 16) | + (intel_crtc->config.pipe_src_h - 1)); } static void intel_get_pipe_timings(struct intel_crtc *crtc, @@ -4742,8 +4799,11 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc, } tmp = I915_READ(PIPESRC(crtc->pipe)); - pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1; - pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1; + pipe_config->pipe_src_h = (tmp & 0xffff) + 1; + pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; + + pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h; + pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w; } static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc, @@ -4779,17 +4839,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE) pipeconf |= PIPECONF_ENABLE; - if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { - /* Enable pixel doubling when the dot clock is > 90% of the (display) - * core speed. - * - * XXX: No double-wide on 915GM pipe B. Is that the only reason for the - * pipe == 0 check? - */ - if (intel_crtc->config.requested_mode.clock > - dev_priv->display.get_display_clock_speed(dev) * 9 / 10) - pipeconf |= PIPECONF_DOUBLE_WIDE; - } + if (intel_crtc->config.double_wide) + pipeconf |= PIPECONF_DOUBLE_WIDE; /* only g4x and later have fancy bpc/dither controls */ if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { @@ -4843,14 +4894,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_display_mode *mode = &intel_crtc->config.requested_mode; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; int refclk, num_connectors = 0; intel_clock_t clock, reduced_clock; u32 dspcntr; bool ok, has_reduced_clock = false; - bool is_lvds = false; + bool is_lvds = false, is_dsi = false; struct intel_encoder *encoder; const intel_limit_t *limit; int ret; @@ -4860,6 +4910,9 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, case INTEL_OUTPUT_LVDS: is_lvds = true; break; + case INTEL_OUTPUT_DSI: + is_dsi = true; + break; } num_connectors++; @@ -4867,18 +4920,21 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, refclk = i9xx_get_refclk(crtc, num_connectors); - /* - * Returns a set of divisors for the desired target clock with the given - * refclk, or FALSE. The returned values represent the clock equation: - * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. - */ - limit = intel_limit(crtc, refclk); - ok = dev_priv->display.find_dpll(limit, crtc, - intel_crtc->config.port_clock, - refclk, NULL, &clock); - if (!ok && !intel_crtc->config.clock_set) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); - return -EINVAL; + if (!is_dsi && !intel_crtc->config.clock_set) { + /* + * Returns a set of divisors for the desired target clock with + * the given refclk, or FALSE. The returned values represent + * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + + * 2) / p1 / p2. + */ + limit = intel_limit(crtc, refclk); + ok = dev_priv->display.find_dpll(limit, crtc, + intel_crtc->config.port_clock, + refclk, NULL, &clock); + if (!ok && !intel_crtc->config.clock_set) { + DRM_ERROR("Couldn't find PLL settings for mode!\n"); + return -EINVAL; + } } if (is_lvds && dev_priv->lvds_downclock_avail) { @@ -4888,6 +4944,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, * by using the FP0/FP1. In such case we will disable the LVDS * downclock feature. */ + limit = intel_limit(crtc, refclk); has_reduced_clock = dev_priv->display.find_dpll(limit, crtc, dev_priv->lvds_downclock, @@ -4903,16 +4960,18 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, intel_crtc->config.dpll.p2 = clock.p2; } - if (IS_GEN2(dev)) + if (IS_GEN2(dev)) { i8xx_update_pll(intel_crtc, has_reduced_clock ? &reduced_clock : NULL, num_connectors); - else if (IS_VALLEYVIEW(dev)) - vlv_update_pll(intel_crtc); - else + } else if (IS_VALLEYVIEW(dev)) { + if (!is_dsi) + vlv_update_pll(intel_crtc); + } else { i9xx_update_pll(intel_crtc, has_reduced_clock ? &reduced_clock : NULL, num_connectors); + } /* Set up the display plane register */ dspcntr = DISPPLANE_GAMMA_ENABLE; @@ -4930,8 +4989,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, * which should always be the user's requested size. */ I915_WRITE(DSPSIZE(plane), - ((mode->vdisplay - 1) << 16) | - (mode->hdisplay - 1)); + ((intel_crtc->config.pipe_src_h - 1) << 16) | + (intel_crtc->config.pipe_src_w - 1)); I915_WRITE(DSPPOS(plane), 0); i9xx_set_pipeconf(intel_crtc); @@ -4941,8 +5000,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, ret = intel_pipe_set_base(crtc, x, y, fb); - intel_update_watermarks(dev); - return ret; } @@ -4987,6 +5044,25 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, if (!(tmp & PIPECONF_ENABLE)) return false; + if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { + switch (tmp & PIPECONF_BPC_MASK) { + case PIPECONF_6BPC: + pipe_config->pipe_bpp = 18; + break; + case PIPECONF_8BPC: + pipe_config->pipe_bpp = 24; + break; + case PIPECONF_10BPC: + pipe_config->pipe_bpp = 30; + break; + default: + break; + } + } + + if (INTEL_INFO(dev)->gen < 4) + pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; + intel_get_pipe_timings(crtc, pipe_config); i9xx_get_pfit_config(crtc, pipe_config); @@ -5019,6 +5095,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, DPLL_PORTB_READY_MASK); } + i9xx_crtc_clock_get(crtc, pipe_config); + return true; } @@ -5826,25 +5904,67 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, ret = intel_pipe_set_base(crtc, x, y, fb); - intel_update_watermarks(dev); - return ret; } +static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, + struct intel_link_m_n *m_n) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe = crtc->pipe; + + m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); + m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); + m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) + & ~TU_SIZE_MASK; + m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); + m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) + & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; +} + +static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, + enum transcoder transcoder, + struct intel_link_m_n *m_n) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe = crtc->pipe; + + if (INTEL_INFO(dev)->gen >= 5) { + m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); + m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); + m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) + & ~TU_SIZE_MASK; + m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); + m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) + & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; + } else { + m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); + m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); + m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) + & ~TU_SIZE_MASK; + m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); + m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) + & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; + } +} + +void intel_dp_get_m_n(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config) +{ + if (crtc->config.has_pch_encoder) + intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); + else + intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, + &pipe_config->dp_m_n); +} + static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - enum transcoder transcoder = pipe_config->cpu_transcoder; - - pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder)); - pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder)); - pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) - & ~TU_SIZE_MASK; - pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); - pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder)) - & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; + intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, + &pipe_config->fdi_m_n); } static void ironlake_get_pfit_config(struct intel_crtc *crtc, @@ -5885,6 +6005,23 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, if (!(tmp & PIPECONF_ENABLE)) return false; + switch (tmp & PIPECONF_BPC_MASK) { + case PIPECONF_6BPC: + pipe_config->pipe_bpp = 18; + break; + case PIPECONF_8BPC: + pipe_config->pipe_bpp = 24; + break; + case PIPECONF_10BPC: + pipe_config->pipe_bpp = 30; + break; + case PIPECONF_12BPC: + pipe_config->pipe_bpp = 36; + break; + default: + break; + } + if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { struct intel_shared_dpll *pll; @@ -5916,6 +6053,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, pipe_config->pixel_multiplier = ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; + + ironlake_pch_clock_get(crtc, pipe_config); } else { pipe_config->pixel_multiplier = 1; } @@ -6001,7 +6140,10 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv, val = I915_READ(D_COMP); val |= D_COMP_COMP_DISABLE; - I915_WRITE(D_COMP, val); + mutex_lock(&dev_priv->rps.hw_lock); + if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) + DRM_ERROR("Failed to disable D_COMP\n"); + mutex_unlock(&dev_priv->rps.hw_lock); POSTING_READ(D_COMP); ndelay(100); @@ -6043,7 +6185,10 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv) val = I915_READ(D_COMP); val |= D_COMP_COMP_FORCE; val &= ~D_COMP_COMP_DISABLE; - I915_WRITE(D_COMP, val); + mutex_lock(&dev_priv->rps.hw_lock); + if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) + DRM_ERROR("Failed to enable D_COMP\n"); + mutex_unlock(&dev_priv->rps.hw_lock); POSTING_READ(D_COMP); val = I915_READ(LCPLL_CTL); @@ -6280,8 +6425,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, ret = intel_pipe_set_base(crtc, x, y, fb); - intel_update_watermarks(dev); - return ret; } @@ -6659,8 +6802,12 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) if (!crtc->enabled || !intel_crtc->active) return; - if (!HAS_PCH_SPLIT(dev_priv->dev)) - assert_pll_enabled(dev_priv, pipe); + if (!HAS_PCH_SPLIT(dev_priv->dev)) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) + assert_dsi_pll_enabled(dev_priv); + else + assert_pll_enabled(dev_priv, pipe); + } /* use legacy palette for Ironlake */ if (HAS_PCH_SPLIT(dev)) @@ -6782,23 +6929,20 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, int pipe = intel_crtc->pipe; int x = intel_crtc->cursor_x; int y = intel_crtc->cursor_y; - u32 base, pos; + u32 base = 0, pos = 0; bool visible; - pos = 0; - - if (on && crtc->enabled && crtc->fb) { + if (on) base = intel_crtc->cursor_addr; - if (x > (int) crtc->fb->width) - base = 0; - if (y > (int) crtc->fb->height) - base = 0; - } else + if (x >= intel_crtc->config.pipe_src_w) + base = 0; + + if (y >= intel_crtc->config.pipe_src_h) base = 0; if (x < 0) { - if (x + intel_crtc->cursor_width < 0) + if (x + intel_crtc->cursor_width <= 0) base = 0; pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; @@ -6807,7 +6951,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, pos |= x << CURSOR_X_SHIFT; if (y < 0) { - if (y + intel_crtc->cursor_height < 0) + if (y + intel_crtc->cursor_height <= 0) base = 0; pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; @@ -7228,6 +7372,22 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, mutex_unlock(&crtc->mutex); } +static int i9xx_pll_refclk(struct drm_device *dev, + const struct intel_crtc_config *pipe_config) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpll = pipe_config->dpll_hw_state.dpll; + + if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) + return dev_priv->vbt.lvds_ssc_freq * 1000; + else if (HAS_PCH_SPLIT(dev)) + return 120000; + else if (!IS_GEN2(dev)) + return 96000; + else + return 48000; +} + /* Returns the clock of the currently programmed mode of the given pipe. */ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config) @@ -7235,14 +7395,15 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; int pipe = pipe_config->cpu_transcoder; - u32 dpll = I915_READ(DPLL(pipe)); + u32 dpll = pipe_config->dpll_hw_state.dpll; u32 fp; intel_clock_t clock; + int refclk = i9xx_pll_refclk(dev, pipe_config); if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) - fp = I915_READ(FP0(pipe)); + fp = pipe_config->dpll_hw_state.fp0; else - fp = I915_READ(FP1(pipe)); + fp = pipe_config->dpll_hw_state.fp1; clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; if (IS_PINEVIEW(dev)) { @@ -7273,14 +7434,13 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, default: DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " "mode\n", (int)(dpll & DPLL_MODE_MASK)); - pipe_config->adjusted_mode.clock = 0; return; } if (IS_PINEVIEW(dev)) - pineview_clock(96000, &clock); + pineview_clock(refclk, &clock); else - i9xx_clock(96000, &clock); + i9xx_clock(refclk, &clock); } else { bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); @@ -7288,13 +7448,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> DPLL_FPA01_P1_POST_DIV_SHIFT); clock.p2 = 14; - - if ((dpll & PLL_REF_INPUT_MASK) == - PLLB_REF_INPUT_SPREADSPECTRUMIN) { - /* XXX: might not be 66MHz */ - i9xx_clock(66000, &clock); - } else - i9xx_clock(48000, &clock); } else { if (dpll & PLL_P1_DIVIDE_BY_TWO) clock.p1 = 2; @@ -7306,59 +7459,55 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, clock.p2 = 4; else clock.p2 = 2; - - i9xx_clock(48000, &clock); } + + i9xx_clock(refclk, &clock); } - pipe_config->adjusted_mode.clock = clock.dot; + /* + * This value includes pixel_multiplier. We will use + * port_clock to compute adjusted_mode.clock in the + * encoder's get_config() function. + */ + pipe_config->port_clock = clock.dot; } -static void ironlake_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_config *pipe_config) +int intel_dotclock_calculate(int link_freq, + const struct intel_link_m_n *m_n) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; - int link_freq, repeat; - u64 clock; - u32 link_m, link_n; - - repeat = pipe_config->pixel_multiplier; - /* * The calculation for the data clock is: - * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp + * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp * But we want to avoid losing precison if possible, so: - * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp)) + * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) * * and the link clock is simpler: - * link_clock = (m * link_clock * repeat) / n + * link_clock = (m * link_clock) / n */ + if (!m_n->link_n) + return 0; + + return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); +} + +static void ironlake_pch_clock_get(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + + /* read out port_clock from the DPLL */ + i9xx_crtc_clock_get(crtc, pipe_config); + /* - * We need to get the FDI or DP link clock here to derive - * the M/N dividers. - * - * For FDI, we read it from the BIOS or use a fixed 2.7GHz. - * For DP, it's either 1.62GHz or 2.7GHz. - * We do our calculations in 10*MHz since we don't need much precison. + * This value does not include pixel_multiplier. + * We will check that port_clock and adjusted_mode.clock + * agree once we know their relationship in the encoder's + * get_config() function. */ - if (pipe_config->has_pch_encoder) - link_freq = intel_fdi_link_freq(dev) * 10000; - else - link_freq = pipe_config->port_clock; - - link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder)); - link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder)); - - if (!link_m || !link_n) - return; - - clock = ((u64)link_m * (u64)link_freq * (u64)repeat); - do_div(clock, link_n); - - pipe_config->adjusted_mode.clock = clock; + pipe_config->adjusted_mode.clock = + intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000, + &pipe_config->fdi_m_n); } /** Returns the currently programmed mode of the given pipe. */ @@ -7374,6 +7523,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, int hsync = I915_READ(HSYNC(cpu_transcoder)); int vtot = I915_READ(VTOTAL(cpu_transcoder)); int vsync = I915_READ(VSYNC(cpu_transcoder)); + enum pipe pipe = intel_crtc->pipe; mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) @@ -7386,8 +7536,11 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need * to use a real value here instead. */ - pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe; + pipe_config.cpu_transcoder = (enum transcoder) pipe; pipe_config.pixel_multiplier = 1; + pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe)); + pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe)); + pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe)); i9xx_crtc_clock_get(intel_crtc, &pipe_config); mode->clock = pipe_config.adjusted_mode.clock; @@ -7684,7 +7837,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, intel_ring_emit(ring, 0); /* aux display base address, unused */ intel_mark_page_flip_active(intel_crtc); - intel_ring_advance(ring); + __intel_ring_advance(ring); return 0; err_unpin: @@ -7726,7 +7879,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_NOOP); intel_mark_page_flip_active(intel_crtc); - intel_ring_advance(ring); + __intel_ring_advance(ring); return 0; err_unpin: @@ -7775,7 +7928,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, intel_ring_emit(ring, pf | pipesrc); intel_mark_page_flip_active(intel_crtc); - intel_ring_advance(ring); + __intel_ring_advance(ring); return 0; err_unpin: @@ -7820,7 +7973,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, intel_ring_emit(ring, pf | pipesrc); intel_mark_page_flip_active(intel_crtc); - intel_ring_advance(ring); + __intel_ring_advance(ring); return 0; err_unpin: @@ -7899,7 +8052,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(ring, (MI_NOOP)); intel_mark_page_flip_active(intel_crtc); - intel_ring_advance(ring); + __intel_ring_advance(ring); return 0; err_unpin: @@ -8179,6 +8332,17 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, return bpp; } +static void intel_dump_crtc_timings(const struct drm_display_mode *mode) +{ + DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " + "type: 0x%x flags: 0x%x\n", + mode->clock, + mode->crtc_hdisplay, mode->crtc_hsync_start, + mode->crtc_hsync_end, mode->crtc_htotal, + mode->crtc_vdisplay, mode->crtc_vsync_start, + mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); +} + static void intel_dump_pipe_config(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config, const char *context) @@ -8195,10 +8359,19 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, pipe_config->fdi_m_n.tu); + DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", + pipe_config->has_dp_encoder, + pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, + pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, + pipe_config->dp_m_n.tu); DRM_DEBUG_KMS("requested mode:\n"); drm_mode_debug_printmodeline(&pipe_config->requested_mode); DRM_DEBUG_KMS("adjusted mode:\n"); drm_mode_debug_printmodeline(&pipe_config->adjusted_mode); + intel_dump_crtc_timings(&pipe_config->adjusted_mode); + DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); + DRM_DEBUG_KMS("pipe src size: %dx%d\n", + pipe_config->pipe_src_w, pipe_config->pipe_src_h); DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", pipe_config->gmch_pfit.control, pipe_config->gmch_pfit.pgm_ratios, @@ -8208,6 +8381,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, pipe_config->pch_pfit.size, pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); + DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); } static bool check_encoder_cloning(struct drm_crtc *crtc) @@ -8251,6 +8425,10 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, drm_mode_copy(&pipe_config->adjusted_mode, mode); drm_mode_copy(&pipe_config->requested_mode, mode); + + pipe_config->pipe_src_w = mode->hdisplay; + pipe_config->pipe_src_h = mode->vdisplay; + pipe_config->cpu_transcoder = (enum transcoder) to_intel_crtc(crtc)->pipe; pipe_config->shared_dpll = DPLL_ID_PRIVATE; @@ -8304,7 +8482,8 @@ encoder_retry: /* Set default port clock if not overwritten by the encoder. Needs to be * done afterwards in case the encoder adjusts the mode. */ if (!pipe_config->port_clock) - pipe_config->port_clock = pipe_config->adjusted_mode.clock; + pipe_config->port_clock = pipe_config->adjusted_mode.clock * + pipe_config->pixel_multiplier; ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); if (ret < 0) { @@ -8491,13 +8670,9 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) } -static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur, - struct intel_crtc_config *new) +static bool intel_fuzzy_clock_check(int clock1, int clock2) { - int clock1, clock2, diff; - - clock1 = cur->adjusted_mode.clock; - clock2 = new->adjusted_mode.clock; + int diff; if (clock1 == clock2) return true; @@ -8551,6 +8726,15 @@ intel_pipe_config_compare(struct drm_device *dev, return false; \ } +#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ + if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ + DRM_ERROR("mismatch in " #name " " \ + "(expected %i, found %i)\n", \ + current_config->name, \ + pipe_config->name); \ + return false; \ + } + #define PIPE_CONF_QUIRK(quirk) \ ((current_config->quirks | pipe_config->quirks) & (quirk)) @@ -8564,6 +8748,13 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(fdi_m_n.link_n); PIPE_CONF_CHECK_I(fdi_m_n.tu); + PIPE_CONF_CHECK_I(has_dp_encoder); + PIPE_CONF_CHECK_I(dp_m_n.gmch_m); + PIPE_CONF_CHECK_I(dp_m_n.gmch_n); + PIPE_CONF_CHECK_I(dp_m_n.link_m); + PIPE_CONF_CHECK_I(dp_m_n.link_n); + PIPE_CONF_CHECK_I(dp_m_n.tu); + PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay); PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal); PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start); @@ -8594,8 +8785,8 @@ intel_pipe_config_compare(struct drm_device *dev, DRM_MODE_FLAG_NVSYNC); } - PIPE_CONF_CHECK_I(requested_mode.hdisplay); - PIPE_CONF_CHECK_I(requested_mode.vdisplay); + PIPE_CONF_CHECK_I(pipe_src_w); + PIPE_CONF_CHECK_I(pipe_src_h); PIPE_CONF_CHECK_I(gmch_pfit.control); /* pfit ratios are autocomputed by the hw on gen4+ */ @@ -8610,26 +8801,28 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(ips_enabled); + PIPE_CONF_CHECK_I(double_wide); + PIPE_CONF_CHECK_I(shared_dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); PIPE_CONF_CHECK_X(dpll_hw_state.fp0); PIPE_CONF_CHECK_X(dpll_hw_state.fp1); + if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) + PIPE_CONF_CHECK_I(pipe_bpp); + + if (!IS_HASWELL(dev)) { + PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); + } + #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_FLAGS +#undef PIPE_CONF_CHECK_CLOCK_FUZZY #undef PIPE_CONF_QUIRK - if (!IS_HASWELL(dev)) { - if (!intel_fuzzy_clock_check(current_config, pipe_config)) { - DRM_ERROR("mismatch in clock (expected %d, found %d)\n", - current_config->adjusted_mode.clock, - pipe_config->adjusted_mode.clock); - return false; - } - } - return true; } @@ -8761,9 +8954,6 @@ check_crtc_state(struct drm_device *dev) encoder->get_config(encoder, &pipe_config); } - if (dev_priv->display.get_clock) - dev_priv->display.get_clock(crtc, &pipe_config); - WARN(crtc->active != active, "crtc active state doesn't match with hw state " "(expected %i, found %i)\n", crtc->active, active); @@ -8838,6 +9028,18 @@ intel_modeset_check_state(struct drm_device *dev) check_shared_dpll_state(dev); } +void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, + int dotclock) +{ + /* + * FDI already provided one idea for the dotclock. + * Yell if the encoder disagrees. + */ + WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.clock, dotclock), + "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", + pipe_config->adjusted_mode.clock, dotclock); +} + static int __intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *fb) @@ -9555,6 +9757,8 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); } + + intel_dsi_init(dev); } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { bool found = false; @@ -9787,7 +9991,6 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_plane = ironlake_update_plane; } else if (HAS_PCH_SPLIT(dev)) { dev_priv->display.get_pipe_config = ironlake_get_pipe_config; - dev_priv->display.get_clock = ironlake_crtc_clock_get; dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; dev_priv->display.crtc_enable = ironlake_crtc_enable; dev_priv->display.crtc_disable = ironlake_crtc_disable; @@ -9795,7 +9998,6 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_plane = ironlake_update_plane; } else if (IS_VALLEYVIEW(dev)) { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; - dev_priv->display.get_clock = i9xx_crtc_clock_get; dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; dev_priv->display.crtc_enable = valleyview_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; @@ -9803,7 +10005,6 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.update_plane = i9xx_update_plane; } else { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; - dev_priv->display.get_clock = i9xx_crtc_clock_get; dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; dev_priv->display.crtc_enable = i9xx_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; @@ -9989,20 +10190,11 @@ static struct intel_quirk intel_quirks[] = { /* Sony Vaio Y cannot use SSC on LVDS */ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, - /* Acer Aspire 5734Z must invert backlight brightness */ - { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, - - /* Acer/eMachines G725 */ - { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, - - /* Acer/eMachines e725 */ - { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, - - /* Acer/Packard Bell NCL20 */ - { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, - - /* Acer Aspire 4736Z */ - { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, + /* + * All GM45 Acer (and its brands eMachines and Packard Bell) laptops + * seem to use inverted backlight PWM. + */ + { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness }, /* Dell XPS13 HD Sandy Bridge */ { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, @@ -10078,8 +10270,6 @@ void i915_disable_vga_mem(struct drm_device *dev) void intel_modeset_init_hw(struct drm_device *dev) { - intel_init_power_well(dev); - intel_prepare_ddi(dev); intel_init_clock_gating(dev); @@ -10422,15 +10612,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) pipe); } - list_for_each_entry(crtc, &dev->mode_config.crtc_list, - base.head) { - if (!crtc->active) - continue; - if (dev_priv->display.get_clock) - dev_priv->display.get_clock(crtc, - &crtc->config); - } - list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { if (connector->get_hw_state(connector)) { diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 79c14e298ba6..60d2006fe15d 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -38,6 +38,32 @@ #define DP_LINK_CHECK_TIMEOUT (10 * 1000) +struct dp_link_dpll { + int link_bw; + struct dpll dpll; +}; + +static const struct dp_link_dpll gen4_dpll[] = { + { DP_LINK_BW_1_62, + { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, + { DP_LINK_BW_2_7, + { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } +}; + +static const struct dp_link_dpll pch_dpll[] = { + { DP_LINK_BW_1_62, + { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, + { DP_LINK_BW_2_7, + { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } +}; + +static const struct dp_link_dpll vlv_dpll[] = { + { DP_LINK_BW_1_62, + { .p1 = 3, .p2 = 2, .n = 5, .m1 = 5, .m2 = 3 } }, + { DP_LINK_BW_2_7, + { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } +}; + /** * is_edp - is the given port attached to an eDP panel (either CPU or PCH) * @intel_dp: DP struct @@ -211,24 +237,77 @@ intel_hrawclk(struct drm_device *dev) } } +static void +intel_dp_init_panel_power_sequencer(struct drm_device *dev, + struct intel_dp *intel_dp, + struct edp_power_seq *out); +static void +intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, + struct intel_dp *intel_dp, + struct edp_power_seq *out); + +static enum pipe +vlv_power_sequencer_pipe(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_crtc *crtc = intel_dig_port->base.base.crtc; + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_dig_port->port; + enum pipe pipe; + + /* modeset should have pipe */ + if (crtc) + return to_intel_crtc(crtc)->pipe; + + /* init time, try to find a pipe with this port selected */ + for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { + u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) & + PANEL_PORT_SELECT_MASK; + if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B) + return pipe; + if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C) + return pipe; + } + + /* shrug */ + return PIPE_A; +} + +static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + + if (HAS_PCH_SPLIT(dev)) + return PCH_PP_CONTROL; + else + return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); +} + +static u32 _pp_stat_reg(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + + if (HAS_PCH_SPLIT(dev)) + return PCH_PP_STATUS; + else + return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); +} + static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp_stat_reg; - pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; - return (I915_READ(pp_stat_reg) & PP_ON) != 0; + return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; } static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp_ctrl_reg; - pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; - return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0; + return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0; } static void @@ -236,19 +315,15 @@ intel_dp_check_edp(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp_stat_reg, pp_ctrl_reg; if (!is_edp(intel_dp)) return; - pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; - pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; - if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { WARN(1, "eDP powered off while attempting aux channel communication.\n"); DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", - I915_READ(pp_stat_reg), - I915_READ(pp_ctrl_reg)); + I915_READ(_pp_stat_reg(intel_dp)), + I915_READ(_pp_ctrl_reg(intel_dp))); } } @@ -361,6 +436,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, goto out; } + /* Only 5 data registers! */ + if (WARN_ON(send_bytes > 20 || recv_size > 20)) { + ret = -E2BIG; + goto out; + } + while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) { /* Must try at least 3 times according to DP spec */ for (try = 0; try < 5; try++) { @@ -451,9 +532,10 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp, int msg_bytes; uint8_t ack; + if (WARN_ON(send_bytes > 16)) + return -E2BIG; + intel_dp_check_edp(intel_dp); - if (send_bytes > 16) - return -1; msg[0] = AUX_NATIVE_WRITE << 4; msg[1] = address >> 8; msg[2] = address & 0xff; @@ -494,6 +576,9 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, uint8_t ack; int ret; + if (WARN_ON(recv_bytes > 19)) + return -E2BIG; + intel_dp_check_edp(intel_dp); msg[0] = AUX_NATIVE_READ << 4; msg[1] = address >> 8; @@ -660,41 +745,30 @@ intel_dp_set_clock(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config, int link_bw) { struct drm_device *dev = encoder->base.dev; + const struct dp_link_dpll *divisor = NULL; + int i, count = 0; if (IS_G4X(dev)) { - if (link_bw == DP_LINK_BW_1_62) { - pipe_config->dpll.p1 = 2; - pipe_config->dpll.p2 = 10; - pipe_config->dpll.n = 2; - pipe_config->dpll.m1 = 23; - pipe_config->dpll.m2 = 8; - } else { - pipe_config->dpll.p1 = 1; - pipe_config->dpll.p2 = 10; - pipe_config->dpll.n = 1; - pipe_config->dpll.m1 = 14; - pipe_config->dpll.m2 = 2; - } - pipe_config->clock_set = true; + divisor = gen4_dpll; + count = ARRAY_SIZE(gen4_dpll); } else if (IS_HASWELL(dev)) { /* Haswell has special-purpose DP DDI clocks. */ } else if (HAS_PCH_SPLIT(dev)) { - if (link_bw == DP_LINK_BW_1_62) { - pipe_config->dpll.n = 1; - pipe_config->dpll.p1 = 2; - pipe_config->dpll.p2 = 10; - pipe_config->dpll.m1 = 12; - pipe_config->dpll.m2 = 9; - } else { - pipe_config->dpll.n = 2; - pipe_config->dpll.p1 = 1; - pipe_config->dpll.p2 = 10; - pipe_config->dpll.m1 = 14; - pipe_config->dpll.m2 = 8; - } - pipe_config->clock_set = true; + divisor = pch_dpll; + count = ARRAY_SIZE(pch_dpll); } else if (IS_VALLEYVIEW(dev)) { - /* FIXME: Need to figure out optimized DP clocks for vlv. */ + divisor = vlv_dpll; + count = ARRAY_SIZE(vlv_dpll); + } + + if (divisor && count) { + for (i = 0; i < count; i++) { + if (link_bw == divisor[i].link_bw) { + pipe_config->dpll = divisor[i].dpll; + pipe_config->clock_set = true; + break; + } + } } } @@ -944,8 +1018,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = dev->dev_private; u32 pp_stat_reg, pp_ctrl_reg; - pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; - pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + pp_stat_reg = _pp_stat_reg(intel_dp); + pp_ctrl_reg = _pp_ctrl_reg(intel_dp); DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", mask, value, @@ -987,11 +1061,8 @@ static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; u32 control; - u32 pp_ctrl_reg; - - pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; - control = I915_READ(pp_ctrl_reg); + control = I915_READ(_pp_ctrl_reg(intel_dp)); control &= ~PANEL_UNLOCK_MASK; control |= PANEL_UNLOCK_REGS; return control; @@ -1024,8 +1095,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) pp = ironlake_get_pp_control(intel_dp); pp |= EDP_FORCE_VDD; - pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; - pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + pp_stat_reg = _pp_stat_reg(intel_dp); + pp_ctrl_reg = _pp_ctrl_reg(intel_dp); I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); @@ -1053,8 +1124,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) pp = ironlake_get_pp_control(intel_dp); pp &= ~EDP_FORCE_VDD; - pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS; - pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + pp_stat_reg = _pp_ctrl_reg(intel_dp); + pp_ctrl_reg = _pp_stat_reg(intel_dp); I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); @@ -1119,20 +1190,19 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp) ironlake_wait_panel_power_cycle(intel_dp); + pp_ctrl_reg = _pp_ctrl_reg(intel_dp); pp = ironlake_get_pp_control(intel_dp); if (IS_GEN5(dev)) { /* ILK workaround: disable reset around power sequence */ pp &= ~PANEL_POWER_RESET; - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); } pp |= POWER_TARGET_ON; if (!IS_GEN5(dev)) pp |= PANEL_POWER_RESET; - pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; - I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); @@ -1140,8 +1210,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp) if (IS_GEN5(dev)) { pp |= PANEL_POWER_RESET; /* restore panel reset bit */ - I915_WRITE(PCH_PP_CONTROL, pp); - POSTING_READ(PCH_PP_CONTROL); + I915_WRITE(pp_ctrl_reg, pp); + POSTING_READ(pp_ctrl_reg); } } @@ -1164,7 +1234,7 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp) * panels get very unhappy and cease to work. */ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); - pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + pp_ctrl_reg = _pp_ctrl_reg(intel_dp); I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); @@ -1197,7 +1267,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp) pp = ironlake_get_pp_control(intel_dp); pp |= EDP_BLC_ENABLE; - pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + pp_ctrl_reg = _pp_ctrl_reg(intel_dp); I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); @@ -1221,7 +1291,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp) pp = ironlake_get_pp_control(intel_dp); pp &= ~EDP_BLC_ENABLE; - pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL; + pp_ctrl_reg = _pp_ctrl_reg(intel_dp); I915_WRITE(pp_ctrl_reg, pp); POSTING_READ(pp_ctrl_reg); @@ -1368,6 +1438,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; enum port port = dp_to_dig_port(intel_dp)->port; struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); + int dotclock; if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { tmp = I915_READ(intel_dp->output_reg); @@ -1395,12 +1466,24 @@ static void intel_dp_get_config(struct intel_encoder *encoder, pipe_config->adjusted_mode.flags |= flags; - if (dp_to_dig_port(intel_dp)->port == PORT_A) { + pipe_config->has_dp_encoder = true; + + intel_dp_get_m_n(crtc, pipe_config); + + if (port == PORT_A) { if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) pipe_config->port_clock = 162000; else pipe_config->port_clock = 270000; } + + dotclock = intel_dotclock_calculate(pipe_config->port_clock, + &pipe_config->dp_m_n); + + if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A) + ironlake_check_encoder_dotclock(pipe_config, dotclock); + + pipe_config->adjusted_mode.clock = dotclock; } static bool is_edp_psr(struct intel_dp *intel_dp) @@ -1566,7 +1649,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) } intel_crtc = to_intel_crtc(crtc); - if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) { + if (!intel_crtc_active(crtc)) { DRM_DEBUG_KMS("crtc not active for PSR\n"); dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE; return false; @@ -1593,7 +1676,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) return false; } - if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) { + if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED; return false; @@ -1713,14 +1796,24 @@ static void intel_enable_dp(struct intel_encoder *encoder) ironlake_edp_panel_vdd_off(intel_dp, true); intel_dp_complete_link_train(intel_dp); intel_dp_stop_link_train(intel_dp); +} + +static void g4x_enable_dp(struct intel_encoder *encoder) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + intel_enable_dp(encoder); ironlake_edp_backlight_on(intel_dp); } static void vlv_enable_dp(struct intel_encoder *encoder) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + ironlake_edp_backlight_on(intel_dp); } -static void intel_pre_enable_dp(struct intel_encoder *encoder) +static void g4x_pre_enable_dp(struct intel_encoder *encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_digital_port *dport = dp_to_dig_port(intel_dp); @@ -1738,53 +1831,59 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder) struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); int port = vlv_dport_to_channel(dport); int pipe = intel_crtc->pipe; + struct edp_power_seq power_seq; u32 val; mutex_lock(&dev_priv->dpio_lock); - val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port)); + val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port)); val = 0; if (pipe) val |= (1<<21); else val &= ~(1<<21); val |= 0x001000c4; - vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val); - vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018); - vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888); + vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val); + vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018); + vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888); mutex_unlock(&dev_priv->dpio_lock); + /* init power sequencer on this pipe and port */ + intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); + intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, + &power_seq); + intel_enable_dp(encoder); vlv_wait_port_ready(dev_priv, port); } -static void intel_dp_pre_pll_enable(struct intel_encoder *encoder) +static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) { struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = + to_intel_crtc(encoder->base.crtc); int port = vlv_dport_to_channel(dport); - - if (!IS_VALLEYVIEW(dev)) - return; + int pipe = intel_crtc->pipe; /* Program Tx lane resets to default */ mutex_lock(&dev_priv->dpio_lock); - vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), + vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); - vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), + vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | (1<dpio_lock); } @@ -1919,10 +2018,13 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) struct drm_device *dev = intel_dp_to_dev(intel_dp); struct drm_i915_private *dev_priv = dev->dev_private; struct intel_digital_port *dport = dp_to_dig_port(intel_dp); + struct intel_crtc *intel_crtc = + to_intel_crtc(dport->base.base.crtc); unsigned long demph_reg_value, preemph_reg_value, uniqtranscale_reg_value; uint8_t train_set = intel_dp->train_set[0]; int port = vlv_dport_to_channel(dport); + int pipe = intel_crtc->pipe; switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPHASIS_0: @@ -1998,14 +2100,14 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) } mutex_lock(&dev_priv->dpio_lock); - vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000); - vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value); - vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port), + vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000); + vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value); + vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port), uniqtranscale_reg_value); - vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040); - vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000); - vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); - vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000); + vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040); + vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000); + vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value); + vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000); mutex_unlock(&dev_priv->dpio_lock); return 0; @@ -3144,24 +3246,26 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct edp_power_seq cur, vbt, spec, final; u32 pp_on, pp_off, pp_div, pp; - int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg; + int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; if (HAS_PCH_SPLIT(dev)) { - pp_control_reg = PCH_PP_CONTROL; + pp_ctrl_reg = PCH_PP_CONTROL; pp_on_reg = PCH_PP_ON_DELAYS; pp_off_reg = PCH_PP_OFF_DELAYS; pp_div_reg = PCH_PP_DIVISOR; } else { - pp_control_reg = PIPEA_PP_CONTROL; - pp_on_reg = PIPEA_PP_ON_DELAYS; - pp_off_reg = PIPEA_PP_OFF_DELAYS; - pp_div_reg = PIPEA_PP_DIVISOR; + enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); + + pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); + pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); + pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); + pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); } /* Workaround: Need to write PP_CONTROL with the unlock key as * the very first thing. */ pp = ironlake_get_pp_control(intel_dp); - I915_WRITE(pp_control_reg, pp); + I915_WRITE(pp_ctrl_reg, pp); pp_on = I915_READ(pp_on_reg); pp_off = I915_READ(pp_off_reg); @@ -3249,9 +3353,11 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, pp_off_reg = PCH_PP_OFF_DELAYS; pp_div_reg = PCH_PP_DIVISOR; } else { - pp_on_reg = PIPEA_PP_ON_DELAYS; - pp_off_reg = PIPEA_PP_OFF_DELAYS; - pp_div_reg = PIPEA_PP_DIVISOR; + enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); + + pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); + pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); + pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); } /* And finally store the new values in the power sequencer. */ @@ -3268,12 +3374,15 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, /* Haswell doesn't have any port selection bits for the panel * power sequencer any more. */ if (IS_VALLEYVIEW(dev)) { - port_sel = I915_READ(pp_on_reg) & 0xc0000000; + if (dp_to_dig_port(intel_dp)->port == PORT_B) + port_sel = PANEL_PORT_SELECT_DPB_VLV; + else + port_sel = PANEL_PORT_SELECT_DPC_VLV; } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { if (dp_to_dig_port(intel_dp)->port == PORT_A) - port_sel = PANEL_POWER_PORT_DP_A; + port_sel = PANEL_PORT_SELECT_DPA; else - port_sel = PANEL_POWER_PORT_DP_D; + port_sel = PANEL_PORT_SELECT_DPD; } pp_on |= port_sel; @@ -3539,12 +3648,12 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) intel_encoder->get_hw_state = intel_dp_get_hw_state; intel_encoder->get_config = intel_dp_get_config; if (IS_VALLEYVIEW(dev)) { - intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable; + intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; intel_encoder->pre_enable = vlv_pre_enable_dp; intel_encoder->enable = vlv_enable_dp; } else { - intel_encoder->pre_enable = intel_pre_enable_dp; - intel_encoder->enable = intel_enable_dp; + intel_encoder->pre_enable = g4x_pre_enable_dp; + intel_encoder->enable = g4x_enable_dp; } intel_dig_port->port = port; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 28cae80495e2..4b75328462ed 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -93,13 +93,17 @@ #define INTEL_OUTPUT_HDMI 6 #define INTEL_OUTPUT_DISPLAYPORT 7 #define INTEL_OUTPUT_EDP 8 -#define INTEL_OUTPUT_UNKNOWN 9 +#define INTEL_OUTPUT_DSI 9 +#define INTEL_OUTPUT_UNKNOWN 10 #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 #define INTEL_DVO_CHIP_TMDS 2 #define INTEL_DVO_CHIP_TVOUT 4 +#define INTEL_DSI_COMMAND_MODE 0 +#define INTEL_DSI_VIDEO_MODE 1 + struct intel_framebuffer { struct drm_framebuffer base; struct drm_i915_gem_object *obj; @@ -207,8 +211,21 @@ struct intel_crtc_config { #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ unsigned long quirks; + /* User requested mode, only valid as a starting point to + * compute adjusted_mode, except in the case of (S)DVO where + * it's also for the output timings of the (S)DVO chip. + * adjusted_mode will then correspond to the S(DVO) chip's + * preferred input timings. */ struct drm_display_mode requested_mode; + /* Actual pipe timings ie. what we program into the pipe timing + * registers. adjusted_mode.clock is the pipe pixel clock. */ struct drm_display_mode adjusted_mode; + + /* Pipe source size (ie. panel fitter input size) + * All planes will be positioned inside this space, + * and get clipped at the edges. */ + int pipe_src_w, pipe_src_h; + /* Whether to set up the PCH/FDI. Note that we never allow sharing * between pch encoders and cpu encoders. */ bool has_pch_encoder; @@ -262,7 +279,8 @@ struct intel_crtc_config { /* * Frequence the dpll for the port should run at. Differs from the - * adjusted dotclock e.g. for DP or 12bpc hdmi mode. + * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also + * already multiplied by pixel_multiplier. */ int port_clock; @@ -288,6 +306,8 @@ struct intel_crtc_config { struct intel_link_m_n fdi_m_n; bool ips_enabled; + + bool double_wide; }; struct intel_crtc { @@ -522,6 +542,7 @@ extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring); extern void intel_mark_idle(struct drm_device *dev); extern void intel_lvds_init(struct drm_device *dev); +extern bool intel_dsi_init(struct drm_device *dev); extern bool intel_is_dual_link_lvds(struct drm_device *dev); extern void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); @@ -708,9 +729,10 @@ extern void intel_write_eld(struct drm_encoder *encoder, extern void intel_prepare_ddi(struct drm_device *dev); extern void hsw_fdi_link_train(struct drm_crtc *crtc); extern void intel_ddi_init(struct drm_device *dev, enum port port); +extern enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder); /* For use by IVB LP watermark workaround in intel_sprite.c */ -extern void intel_update_watermarks(struct drm_device *dev); +extern void intel_update_watermarks(struct drm_crtc *crtc); extern void intel_update_sprite_watermarks(struct drm_plane *plane, struct drm_crtc *crtc, uint32_t sprite_width, int pixel_size, @@ -741,8 +763,13 @@ extern void i915_remove_power_well(struct drm_device *dev); extern bool intel_display_power_enabled(struct drm_device *dev, enum intel_display_power_domain domain); +extern void intel_display_power_get(struct drm_device *dev, + enum intel_display_power_domain domain); +extern void intel_display_power_put(struct drm_device *dev, + enum intel_display_power_domain domain); extern void intel_init_power_well(struct drm_device *dev); extern void intel_set_power_well(struct drm_device *dev, bool enable); +extern void intel_resume_power_well(struct drm_device *dev); extern void intel_enable_gt_powersave(struct drm_device *dev); extern void intel_disable_gt_powersave(struct drm_device *dev); extern void ironlake_teardown_rc6(struct drm_device *dev); @@ -793,6 +820,14 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev); extern void hsw_pc8_restore_interrupts(struct drm_device *dev); extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); +extern void intel_dp_get_m_n(struct intel_crtc *crtc, + struct intel_crtc_config *pipe_config); +extern int intel_dotclock_calculate(int link_freq, + const struct intel_link_m_n *m_n); +extern void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config, + int dotclock); + +extern bool intel_crtc_active(struct drm_crtc *crtc); extern void i915_disable_vga_mem(struct drm_device *dev); #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c new file mode 100644 index 000000000000..674fd4989b45 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -0,0 +1,621 @@ +/* + * Copyright © 2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Author: Jani Nikula + */ + +#include +#include +#include +#include +#include +#include "i915_drv.h" +#include "intel_drv.h" +#include "intel_dsi.h" +#include "intel_dsi_cmd.h" + +/* the sub-encoders aka panel drivers */ +static const struct intel_dsi_device intel_dsi_devices[] = { +}; + + +static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val, + u32 mask) +{ + u32 tmp = vlv_cck_read(dev_priv, reg); + tmp &= ~mask; + tmp |= val; + vlv_cck_write(dev_priv, reg, tmp); +} + +static void band_gap_wa(struct drm_i915_private *dev_priv) +{ + mutex_lock(&dev_priv->dpio_lock); + + /* Enable bandgap fix in GOP driver */ + vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000); + msleep(20); + vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000); + msleep(20); + vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000); + msleep(20); + vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000); + msleep(20); + vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000); + msleep(20); + + /* Turn Display Trunk on */ + vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000); + msleep(20); + + vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000); + msleep(20); + + vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000); + msleep(20); + vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000); + msleep(20); + vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000); + + mutex_unlock(&dev_priv->dpio_lock); + + /* Need huge delay, otherwise clock is not stable */ + msleep(100); +} + +static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_dsi, base); +} + +static inline bool is_vid_mode(struct intel_dsi *intel_dsi) +{ + return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE; +} + +static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) +{ + return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE; +} + +static void intel_dsi_hot_plug(struct intel_encoder *encoder) +{ + DRM_DEBUG_KMS("\n"); +} + +static bool intel_dsi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_config *config) +{ + struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, + base); + struct intel_connector *intel_connector = intel_dsi->attached_connector; + struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; + struct drm_display_mode *adjusted_mode = &config->adjusted_mode; + struct drm_display_mode *mode = &config->requested_mode; + + DRM_DEBUG_KMS("\n"); + + if (fixed_mode) + intel_fixed_panel_mode(fixed_mode, adjusted_mode); + + if (intel_dsi->dev.dev_ops->mode_fixup) + return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev, + mode, adjusted_mode); + + return true; +} + +static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder) +{ + DRM_DEBUG_KMS("\n"); + + vlv_enable_dsi_pll(encoder); +} + +static void intel_dsi_pre_enable(struct intel_encoder *encoder) +{ + DRM_DEBUG_KMS("\n"); +} + +static void intel_dsi_enable(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + int pipe = intel_crtc->pipe; + u32 temp; + + DRM_DEBUG_KMS("\n"); + + temp = I915_READ(MIPI_DEVICE_READY(pipe)); + if ((temp & DEVICE_READY) == 0) { + temp &= ~ULPS_STATE_MASK; + I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY); + } else if (temp & ULPS_STATE_MASK) { + temp &= ~ULPS_STATE_MASK; + I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT); + /* + * We need to ensure that there is a minimum of 1 ms time + * available before clearing the UPLS exit state. + */ + msleep(2); + I915_WRITE(MIPI_DEVICE_READY(pipe), temp); + } + + if (is_cmd_mode(intel_dsi)) + I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4); + + if (is_vid_mode(intel_dsi)) { + msleep(20); /* XXX */ + dpi_send_cmd(intel_dsi, TURN_ON); + msleep(100); + + /* assert ip_tg_enable signal */ + temp = I915_READ(MIPI_PORT_CTRL(pipe)); + I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE); + POSTING_READ(MIPI_PORT_CTRL(pipe)); + } + + intel_dsi->dev.dev_ops->enable(&intel_dsi->dev); +} + +static void intel_dsi_disable(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + int pipe = intel_crtc->pipe; + u32 temp; + + DRM_DEBUG_KMS("\n"); + + intel_dsi->dev.dev_ops->disable(&intel_dsi->dev); + + if (is_vid_mode(intel_dsi)) { + dpi_send_cmd(intel_dsi, SHUTDOWN); + msleep(10); + + /* de-assert ip_tg_enable signal */ + temp = I915_READ(MIPI_PORT_CTRL(pipe)); + I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE); + POSTING_READ(MIPI_PORT_CTRL(pipe)); + + msleep(2); + } + + temp = I915_READ(MIPI_DEVICE_READY(pipe)); + if (temp & DEVICE_READY) { + temp &= ~DEVICE_READY; + temp &= ~ULPS_STATE_MASK; + I915_WRITE(MIPI_DEVICE_READY(pipe), temp); + } +} + +static void intel_dsi_post_disable(struct intel_encoder *encoder) +{ + DRM_DEBUG_KMS("\n"); + + vlv_disable_dsi_pll(encoder); +} + +static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, + enum pipe *pipe) +{ + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; + u32 port, func; + enum pipe p; + + DRM_DEBUG_KMS("\n"); + + /* XXX: this only works for one DSI output */ + for (p = PIPE_A; p <= PIPE_B; p++) { + port = I915_READ(MIPI_PORT_CTRL(p)); + func = I915_READ(MIPI_DSI_FUNC_PRG(p)); + + if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) { + if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) { + *pipe = p; + return true; + } + } + } + + return false; +} + +static void intel_dsi_get_config(struct intel_encoder *encoder, + struct intel_crtc_config *pipe_config) +{ + DRM_DEBUG_KMS("\n"); + + /* XXX: read flags, set to adjusted_mode */ +} + +static int intel_dsi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; + struct intel_dsi *intel_dsi = intel_attached_dsi(connector); + + DRM_DEBUG_KMS("\n"); + + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) { + DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n"); + return MODE_NO_DBLESCAN; + } + + if (fixed_mode) { + if (mode->hdisplay > fixed_mode->hdisplay) + return MODE_PANEL; + if (mode->vdisplay > fixed_mode->vdisplay) + return MODE_PANEL; + } + + return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode); +} + +/* return txclkesc cycles in terms of divider and duration in us */ +static u16 txclkesc(u32 divider, unsigned int us) +{ + switch (divider) { + case ESCAPE_CLOCK_DIVIDER_1: + default: + return 20 * us; + case ESCAPE_CLOCK_DIVIDER_2: + return 10 * us; + case ESCAPE_CLOCK_DIVIDER_4: + return 5 * us; + } +} + +/* return pixels in terms of txbyteclkhs */ +static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count) +{ + return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count); +} + +static void set_dsi_timings(struct drm_encoder *encoder, + const struct drm_display_mode *mode) +{ + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + int pipe = intel_crtc->pipe; + unsigned int bpp = intel_crtc->config.pipe_bpp; + unsigned int lane_count = intel_dsi->lane_count; + + u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp; + + hactive = mode->hdisplay; + hfp = mode->hsync_start - mode->hdisplay; + hsync = mode->hsync_end - mode->hsync_start; + hbp = mode->htotal - mode->hsync_end; + + vfp = mode->vsync_start - mode->vdisplay; + vsync = mode->vsync_end - mode->vsync_start; + vbp = mode->vtotal - mode->vsync_end; + + /* horizontal values are in terms of high speed byte clock */ + hactive = txbyteclkhs(hactive, bpp, lane_count); + hfp = txbyteclkhs(hfp, bpp, lane_count); + hsync = txbyteclkhs(hsync, bpp, lane_count); + hbp = txbyteclkhs(hbp, bpp, lane_count); + + I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive); + I915_WRITE(MIPI_HFP_COUNT(pipe), hfp); + + /* meaningful for video mode non-burst sync pulse mode only, can be zero + * for non-burst sync events and burst modes */ + I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync); + I915_WRITE(MIPI_HBP_COUNT(pipe), hbp); + + /* vertical values are in terms of lines */ + I915_WRITE(MIPI_VFP_COUNT(pipe), vfp); + I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync); + I915_WRITE(MIPI_VBP_COUNT(pipe), vbp); +} + +static void intel_dsi_mode_set(struct intel_encoder *intel_encoder) +{ + struct drm_encoder *encoder = &intel_encoder->base; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); + struct drm_display_mode *adjusted_mode = + &intel_crtc->config.adjusted_mode; + int pipe = intel_crtc->pipe; + unsigned int bpp = intel_crtc->config.pipe_bpp; + u32 val, tmp; + + DRM_DEBUG_KMS("pipe %d\n", pipe); + + /* Update the DSI PLL */ + vlv_enable_dsi_pll(intel_encoder); + + /* XXX: Location of the call */ + band_gap_wa(dev_priv); + + /* escape clock divider, 20MHz, shared for A and C. device ready must be + * off when doing this! txclkesc? */ + tmp = I915_READ(MIPI_CTRL(0)); + tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK; + I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1); + + /* read request priority is per pipe */ + tmp = I915_READ(MIPI_CTRL(pipe)); + tmp &= ~READ_REQUEST_PRIORITY_MASK; + I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH); + + /* XXX: why here, why like this? handling in irq handler?! */ + I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff); + I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff); + + I915_WRITE(MIPI_DPHY_PARAM(pipe), + 0x3c << EXIT_ZERO_COUNT_SHIFT | + 0x1f << TRAIL_COUNT_SHIFT | + 0xc5 << CLK_ZERO_COUNT_SHIFT | + 0x1f << PREPARE_COUNT_SHIFT); + + I915_WRITE(MIPI_DPI_RESOLUTION(pipe), + adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT | + adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT); + + set_dsi_timings(encoder, adjusted_mode); + + val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT; + if (is_cmd_mode(intel_dsi)) { + val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT; + val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */ + } else { + val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT; + + /* XXX: cross-check bpp vs. pixel format? */ + val |= intel_dsi->pixel_format; + } + I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val); + + /* timeouts for recovery. one frame IIUC. if counter expires, EOT and + * stop state. */ + + /* + * In burst mode, value greater than one DPI line Time in byte clock + * (txbyteclkhs) To timeout this timer 1+ of the above said value is + * recommended. + * + * In non-burst mode, Value greater than one DPI frame time in byte + * clock(txbyteclkhs) To timeout this timer 1+ of the above said value + * is recommended. + * + * In DBI only mode, value greater than one DBI frame time in byte + * clock(txbyteclkhs) To timeout this timer 1+ of the above said value + * is recommended. + */ + + if (is_vid_mode(intel_dsi) && + intel_dsi->video_mode_format == VIDEO_MODE_BURST) { + I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe), + txbyteclkhs(adjusted_mode->htotal, bpp, + intel_dsi->lane_count) + 1); + } else { + I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe), + txbyteclkhs(adjusted_mode->vtotal * + adjusted_mode->htotal, + bpp, intel_dsi->lane_count) + 1); + } + I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */ + I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */ + I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */ + + /* dphy stuff */ + + /* in terms of low power clock */ + I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100)); + + /* recovery disables */ + I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable); + + /* in terms of txbyteclkhs. actual high to low switch + + * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK. + * + * XXX: write MIPI_STOP_STATE_STALL? + */ + I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46); + + /* XXX: low power clock equivalence in terms of byte clock. the number + * of byte clocks occupied in one low power clock. based on txbyteclkhs + * and txclkesc. txclkesc time / txbyteclk time * (105 + + * MIPI_STOP_STATE_STALL) / 105.??? + */ + I915_WRITE(MIPI_LP_BYTECLK(pipe), 4); + + /* the bw essential for transmitting 16 long packets containing 252 + * bytes meant for dcs write memory command is programmed in this + * register in terms of byte clocks. based on dsi transfer rate and the + * number of lanes configured the time taken to transmit 16 long packets + * in a dsi stream varies. */ + I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820); + + I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe), + 0xa << LP_HS_SSW_CNT_SHIFT | + 0x14 << HS_LP_PWR_SW_CNT_SHIFT); + + if (is_vid_mode(intel_dsi)) + I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe), + intel_dsi->video_mode_format); +} + +static enum drm_connector_status +intel_dsi_detect(struct drm_connector *connector, bool force) +{ + struct intel_dsi *intel_dsi = intel_attached_dsi(connector); + DRM_DEBUG_KMS("\n"); + return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev); +} + +static int intel_dsi_get_modes(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_display_mode *mode; + + DRM_DEBUG_KMS("\n"); + + if (!intel_connector->panel.fixed_mode) { + DRM_DEBUG_KMS("no fixed mode\n"); + return 0; + } + + mode = drm_mode_duplicate(connector->dev, + intel_connector->panel.fixed_mode); + if (!mode) { + DRM_DEBUG_KMS("drm_mode_duplicate failed\n"); + return 0; + } + + drm_mode_probed_add(connector, mode); + return 1; +} + +static void intel_dsi_destroy(struct drm_connector *connector) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + + DRM_DEBUG_KMS("\n"); + intel_panel_fini(&intel_connector->panel); + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static const struct drm_encoder_funcs intel_dsi_funcs = { + .destroy = intel_encoder_destroy, +}; + +static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = { + .get_modes = intel_dsi_get_modes, + .mode_valid = intel_dsi_mode_valid, + .best_encoder = intel_best_encoder, +}; + +static const struct drm_connector_funcs intel_dsi_connector_funcs = { + .dpms = intel_connector_dpms, + .detect = intel_dsi_detect, + .destroy = intel_dsi_destroy, + .fill_modes = drm_helper_probe_single_connector_modes, +}; + +bool intel_dsi_init(struct drm_device *dev) +{ + struct intel_dsi *intel_dsi; + struct intel_encoder *intel_encoder; + struct drm_encoder *encoder; + struct intel_connector *intel_connector; + struct drm_connector *connector; + struct drm_display_mode *fixed_mode = NULL; + const struct intel_dsi_device *dsi; + unsigned int i; + + DRM_DEBUG_KMS("\n"); + + intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); + if (!intel_dsi) + return false; + + intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); + if (!intel_connector) { + kfree(intel_dsi); + return false; + } + + intel_encoder = &intel_dsi->base; + encoder = &intel_encoder->base; + intel_dsi->attached_connector = intel_connector; + + connector = &intel_connector->base; + + drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI); + + /* XXX: very likely not all of these are needed */ + intel_encoder->hot_plug = intel_dsi_hot_plug; + intel_encoder->compute_config = intel_dsi_compute_config; + intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable; + intel_encoder->pre_enable = intel_dsi_pre_enable; + intel_encoder->enable = intel_dsi_enable; + intel_encoder->mode_set = intel_dsi_mode_set; + intel_encoder->disable = intel_dsi_disable; + intel_encoder->post_disable = intel_dsi_post_disable; + intel_encoder->get_hw_state = intel_dsi_get_hw_state; + intel_encoder->get_config = intel_dsi_get_config; + + intel_connector->get_hw_state = intel_connector_get_hw_state; + + for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) { + dsi = &intel_dsi_devices[i]; + intel_dsi->dev = *dsi; + + if (dsi->dev_ops->init(&intel_dsi->dev)) + break; + } + + if (i == ARRAY_SIZE(intel_dsi_devices)) { + DRM_DEBUG_KMS("no device found\n"); + goto err; + } + + intel_encoder->type = INTEL_OUTPUT_DSI; + intel_encoder->crtc_mask = (1 << 0); /* XXX */ + + intel_encoder->cloneable = false; + drm_connector_init(dev, connector, &intel_dsi_connector_funcs, + DRM_MODE_CONNECTOR_DSI); + + drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs); + + connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/ + connector->interlace_allowed = false; + connector->doublescan_allowed = false; + + intel_connector_attach_encoder(intel_connector, intel_encoder); + + drm_sysfs_connector_add(connector); + + fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev); + if (!fixed_mode) { + DRM_DEBUG_KMS("no fixed mode\n"); + goto err; + } + + fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; + intel_panel_init(&intel_connector->panel, fixed_mode); + + return true; + +err: + drm_encoder_cleanup(&intel_encoder->base); + kfree(intel_dsi); + kfree(intel_connector); + + return false; +} diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h new file mode 100644 index 000000000000..c7765f33d524 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -0,0 +1,102 @@ +/* + * Copyright © 2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _INTEL_DSI_H +#define _INTEL_DSI_H + +#include +#include +#include "intel_drv.h" + +struct intel_dsi_device { + unsigned int panel_id; + const char *name; + int type; + const struct intel_dsi_dev_ops *dev_ops; + void *dev_priv; +}; + +struct intel_dsi_dev_ops { + bool (*init)(struct intel_dsi_device *dsi); + + /* This callback must be able to assume DSI commands can be sent */ + void (*enable)(struct intel_dsi_device *dsi); + + /* This callback must be able to assume DSI commands can be sent */ + void (*disable)(struct intel_dsi_device *dsi); + + int (*mode_valid)(struct intel_dsi_device *dsi, + struct drm_display_mode *mode); + + bool (*mode_fixup)(struct intel_dsi_device *dsi, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + void (*mode_set)(struct intel_dsi_device *dsi, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); + + enum drm_connector_status (*detect)(struct intel_dsi_device *dsi); + + bool (*get_hw_state)(struct intel_dsi_device *dev); + + struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi); + + void (*destroy) (struct intel_dsi_device *dsi); +}; + +struct intel_dsi { + struct intel_encoder base; + + struct intel_dsi_device dev; + + struct intel_connector *attached_connector; + + /* if true, use HS mode, otherwise LP */ + bool hs; + + /* virtual channel */ + int channel; + + /* number of DSI lanes */ + unsigned int lane_count; + + /* video mode pixel format for MIPI_DSI_FUNC_PRG register */ + u32 pixel_format; + + /* video mode format for MIPI_VIDEO_MODE_FORMAT register */ + u32 video_mode_format; + + /* eot for MIPI_EOT_DISABLE register */ + u32 eot_disable; +}; + +static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) +{ + return container_of(encoder, struct intel_dsi, base.base); +} + +extern void vlv_enable_dsi_pll(struct intel_encoder *encoder); +extern void vlv_disable_dsi_pll(struct intel_encoder *encoder); + +#endif /* _INTEL_DSI_H */ diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c new file mode 100644 index 000000000000..7c40f981d2c7 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c @@ -0,0 +1,427 @@ +/* + * Copyright © 2013 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Author: Jani Nikula + */ + +#include +#include +#include +#include