Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Radeon, imx, msm, and i915 fixes. The msm, imx and i915 ones are fairly run of the mill. Radeon had some DP audio and posting reads for irq fixes, along with a fix for 32-bit kernels with new cards, we were using unsigned long to represent GPU side memory space, but since that changed size on 32 vs 64 cards with lots of VRAM failed, so the change has no effect on x86-64, just moves to using uint64_t instead" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (35 commits) drm/msm: kexec fixes drm/msm/mdp5: fix cursor blending drm/msm/mdp5: fix cursor ROI drm/msm/atomic: Don't leak atomic commit object when commit fails drm/msm/mdp5: Avoid flushing registers when CRTC is disabled drm/msm: update generated headers (add 6th lm.base entry) drm/msm/mdp5: fixup "drm/msm: fix fallout of atomic dpms changes" drm/ttm: device address space != CPU address space drm/mm: Support 4 GiB and larger ranges drm/i915: gen4: work around hang during hibernation drm/i915: Check for driver readyness before handling an underrun interrupt drm/radeon: fix interlaced modes on DCE8 drm/radeon: fix DRM_IOCTL_RADEON_CS oops drm/radeon: do a posting read in cik_set_irq drm/radeon: do a posting read in si_set_irq drm/radeon: do a posting read in evergreen_set_irq drm/radeon: do a posting read in r600_set_irq drm/radeon: do a posting read in rs600_set_irq drm/radeon: do a posting read in r100_set_irq radeon/audio: fix DP audio on DCE6 ...
This commit is contained in:
commit
af13e86713
@ -91,29 +91,29 @@
|
||||
*/
|
||||
|
||||
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
enum drm_mm_search_flags flags);
|
||||
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
u64 start,
|
||||
u64 end,
|
||||
enum drm_mm_search_flags flags);
|
||||
|
||||
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
|
||||
struct drm_mm_node *node,
|
||||
unsigned long size, unsigned alignment,
|
||||
u64 size, unsigned alignment,
|
||||
unsigned long color,
|
||||
enum drm_mm_allocator_flags flags)
|
||||
{
|
||||
struct drm_mm *mm = hole_node->mm;
|
||||
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
|
||||
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
|
||||
unsigned long adj_start = hole_start;
|
||||
unsigned long adj_end = hole_end;
|
||||
u64 hole_start = drm_mm_hole_node_start(hole_node);
|
||||
u64 hole_end = drm_mm_hole_node_end(hole_node);
|
||||
u64 adj_start = hole_start;
|
||||
u64 adj_end = hole_end;
|
||||
|
||||
BUG_ON(node->allocated);
|
||||
|
||||
@ -124,12 +124,15 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
|
||||
adj_start = adj_end - size;
|
||||
|
||||
if (alignment) {
|
||||
unsigned tmp = adj_start % alignment;
|
||||
if (tmp) {
|
||||
u64 tmp = adj_start;
|
||||
unsigned rem;
|
||||
|
||||
rem = do_div(tmp, alignment);
|
||||
if (rem) {
|
||||
if (flags & DRM_MM_CREATE_TOP)
|
||||
adj_start -= tmp;
|
||||
adj_start -= rem;
|
||||
else
|
||||
adj_start += alignment - tmp;
|
||||
adj_start += alignment - rem;
|
||||
}
|
||||
}
|
||||
|
||||
@ -176,9 +179,9 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
|
||||
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
|
||||
{
|
||||
struct drm_mm_node *hole;
|
||||
unsigned long end = node->start + node->size;
|
||||
unsigned long hole_start;
|
||||
unsigned long hole_end;
|
||||
u64 end = node->start + node->size;
|
||||
u64 hole_start;
|
||||
u64 hole_end;
|
||||
|
||||
BUG_ON(node == NULL);
|
||||
|
||||
@ -227,7 +230,7 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
|
||||
* 0 on success, -ENOSPC if there's no suitable hole.
|
||||
*/
|
||||
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
|
||||
unsigned long size, unsigned alignment,
|
||||
u64 size, unsigned alignment,
|
||||
unsigned long color,
|
||||
enum drm_mm_search_flags sflags,
|
||||
enum drm_mm_allocator_flags aflags)
|
||||
@ -246,16 +249,16 @@ EXPORT_SYMBOL(drm_mm_insert_node_generic);
|
||||
|
||||
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
|
||||
struct drm_mm_node *node,
|
||||
unsigned long size, unsigned alignment,
|
||||
u64 size, unsigned alignment,
|
||||
unsigned long color,
|
||||
unsigned long start, unsigned long end,
|
||||
u64 start, u64 end,
|
||||
enum drm_mm_allocator_flags flags)
|
||||
{
|
||||
struct drm_mm *mm = hole_node->mm;
|
||||
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
|
||||
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
|
||||
unsigned long adj_start = hole_start;
|
||||
unsigned long adj_end = hole_end;
|
||||
u64 hole_start = drm_mm_hole_node_start(hole_node);
|
||||
u64 hole_end = drm_mm_hole_node_end(hole_node);
|
||||
u64 adj_start = hole_start;
|
||||
u64 adj_end = hole_end;
|
||||
|
||||
BUG_ON(!hole_node->hole_follows || node->allocated);
|
||||
|
||||
@ -271,12 +274,15 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
|
||||
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
|
||||
|
||||
if (alignment) {
|
||||
unsigned tmp = adj_start % alignment;
|
||||
if (tmp) {
|
||||
u64 tmp = adj_start;
|
||||
unsigned rem;
|
||||
|
||||
rem = do_div(tmp, alignment);
|
||||
if (rem) {
|
||||
if (flags & DRM_MM_CREATE_TOP)
|
||||
adj_start -= tmp;
|
||||
adj_start -= rem;
|
||||
else
|
||||
adj_start += alignment - tmp;
|
||||
adj_start += alignment - rem;
|
||||
}
|
||||
}
|
||||
|
||||
@ -324,9 +330,9 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
|
||||
* 0 on success, -ENOSPC if there's no suitable hole.
|
||||
*/
|
||||
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
|
||||
unsigned long size, unsigned alignment,
|
||||
u64 size, unsigned alignment,
|
||||
unsigned long color,
|
||||
unsigned long start, unsigned long end,
|
||||
u64 start, u64 end,
|
||||
enum drm_mm_search_flags sflags,
|
||||
enum drm_mm_allocator_flags aflags)
|
||||
{
|
||||
@ -387,32 +393,34 @@ void drm_mm_remove_node(struct drm_mm_node *node)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_remove_node);
|
||||
|
||||
static int check_free_hole(unsigned long start, unsigned long end,
|
||||
unsigned long size, unsigned alignment)
|
||||
static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
|
||||
{
|
||||
if (end - start < size)
|
||||
return 0;
|
||||
|
||||
if (alignment) {
|
||||
unsigned tmp = start % alignment;
|
||||
u64 tmp = start;
|
||||
unsigned rem;
|
||||
|
||||
rem = do_div(tmp, alignment);
|
||||
if (tmp)
|
||||
start += alignment - tmp;
|
||||
start += alignment - rem;
|
||||
}
|
||||
|
||||
return end >= start + size;
|
||||
}
|
||||
|
||||
static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
enum drm_mm_search_flags flags)
|
||||
{
|
||||
struct drm_mm_node *entry;
|
||||
struct drm_mm_node *best;
|
||||
unsigned long adj_start;
|
||||
unsigned long adj_end;
|
||||
unsigned long best_size;
|
||||
u64 adj_start;
|
||||
u64 adj_end;
|
||||
u64 best_size;
|
||||
|
||||
BUG_ON(mm->scanned_blocks);
|
||||
|
||||
@ -421,7 +429,7 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
|
||||
|
||||
__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
|
||||
flags & DRM_MM_SEARCH_BELOW) {
|
||||
unsigned long hole_size = adj_end - adj_start;
|
||||
u64 hole_size = adj_end - adj_start;
|
||||
|
||||
if (mm->color_adjust) {
|
||||
mm->color_adjust(entry, color, &adj_start, &adj_end);
|
||||
@ -445,18 +453,18 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
|
||||
}
|
||||
|
||||
static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
u64 start,
|
||||
u64 end,
|
||||
enum drm_mm_search_flags flags)
|
||||
{
|
||||
struct drm_mm_node *entry;
|
||||
struct drm_mm_node *best;
|
||||
unsigned long adj_start;
|
||||
unsigned long adj_end;
|
||||
unsigned long best_size;
|
||||
u64 adj_start;
|
||||
u64 adj_end;
|
||||
u64 best_size;
|
||||
|
||||
BUG_ON(mm->scanned_blocks);
|
||||
|
||||
@ -465,7 +473,7 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
|
||||
|
||||
__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
|
||||
flags & DRM_MM_SEARCH_BELOW) {
|
||||
unsigned long hole_size = adj_end - adj_start;
|
||||
u64 hole_size = adj_end - adj_start;
|
||||
|
||||
if (adj_start < start)
|
||||
adj_start = start;
|
||||
@ -561,7 +569,7 @@ EXPORT_SYMBOL(drm_mm_replace_node);
|
||||
* adding/removing nodes to/from the scan list are allowed.
|
||||
*/
|
||||
void drm_mm_init_scan(struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color)
|
||||
{
|
||||
@ -594,11 +602,11 @@ EXPORT_SYMBOL(drm_mm_init_scan);
|
||||
* adding/removing nodes to/from the scan list are allowed.
|
||||
*/
|
||||
void drm_mm_init_scan_with_range(struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
u64 start,
|
||||
u64 end)
|
||||
{
|
||||
mm->scan_color = color;
|
||||
mm->scan_alignment = alignment;
|
||||
@ -627,8 +635,8 @@ bool drm_mm_scan_add_block(struct drm_mm_node *node)
|
||||
{
|
||||
struct drm_mm *mm = node->mm;
|
||||
struct drm_mm_node *prev_node;
|
||||
unsigned long hole_start, hole_end;
|
||||
unsigned long adj_start, adj_end;
|
||||
u64 hole_start, hole_end;
|
||||
u64 adj_start, adj_end;
|
||||
|
||||
mm->scanned_blocks++;
|
||||
|
||||
@ -731,7 +739,7 @@ EXPORT_SYMBOL(drm_mm_clean);
|
||||
*
|
||||
* Note that @mm must be cleared to 0 before calling this function.
|
||||
*/
|
||||
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
|
||||
void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
|
||||
{
|
||||
INIT_LIST_HEAD(&mm->hole_stack);
|
||||
mm->scanned_blocks = 0;
|
||||
@ -766,18 +774,17 @@ void drm_mm_takedown(struct drm_mm * mm)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_takedown);
|
||||
|
||||
static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
|
||||
const char *prefix)
|
||||
static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
|
||||
const char *prefix)
|
||||
{
|
||||
unsigned long hole_start, hole_end, hole_size;
|
||||
u64 hole_start, hole_end, hole_size;
|
||||
|
||||
if (entry->hole_follows) {
|
||||
hole_start = drm_mm_hole_node_start(entry);
|
||||
hole_end = drm_mm_hole_node_end(entry);
|
||||
hole_size = hole_end - hole_start;
|
||||
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
|
||||
prefix, hole_start, hole_end,
|
||||
hole_size);
|
||||
pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
|
||||
hole_end, hole_size);
|
||||
return hole_size;
|
||||
}
|
||||
|
||||
@ -792,35 +799,34 @@ static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
|
||||
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
|
||||
{
|
||||
struct drm_mm_node *entry;
|
||||
unsigned long total_used = 0, total_free = 0, total = 0;
|
||||
u64 total_used = 0, total_free = 0, total = 0;
|
||||
|
||||
total_free += drm_mm_debug_hole(&mm->head_node, prefix);
|
||||
|
||||
drm_mm_for_each_node(entry, mm) {
|
||||
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
|
||||
prefix, entry->start, entry->start + entry->size,
|
||||
entry->size);
|
||||
pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
|
||||
entry->start + entry->size, entry->size);
|
||||
total_used += entry->size;
|
||||
total_free += drm_mm_debug_hole(entry, prefix);
|
||||
}
|
||||
total = total_free + total_used;
|
||||
|
||||
printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
|
||||
total_used, total_free);
|
||||
pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
|
||||
total_used, total_free);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_debug_table);
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
|
||||
static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
|
||||
{
|
||||
unsigned long hole_start, hole_end, hole_size;
|
||||
u64 hole_start, hole_end, hole_size;
|
||||
|
||||
if (entry->hole_follows) {
|
||||
hole_start = drm_mm_hole_node_start(entry);
|
||||
hole_end = drm_mm_hole_node_end(entry);
|
||||
hole_size = hole_end - hole_start;
|
||||
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
|
||||
hole_start, hole_end, hole_size);
|
||||
seq_printf(m, "%#llx-%#llx: %llu: free\n", hole_start,
|
||||
hole_end, hole_size);
|
||||
return hole_size;
|
||||
}
|
||||
|
||||
@ -835,20 +841,20 @@ static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *en
|
||||
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
|
||||
{
|
||||
struct drm_mm_node *entry;
|
||||
unsigned long total_used = 0, total_free = 0, total = 0;
|
||||
u64 total_used = 0, total_free = 0, total = 0;
|
||||
|
||||
total_free += drm_mm_dump_hole(m, &mm->head_node);
|
||||
|
||||
drm_mm_for_each_node(entry, mm) {
|
||||
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
|
||||
entry->start, entry->start + entry->size,
|
||||
entry->size);
|
||||
seq_printf(m, "%#016llx-%#016llx: %llu: used\n", entry->start,
|
||||
entry->start + entry->size, entry->size);
|
||||
total_used += entry->size;
|
||||
total_free += drm_mm_dump_hole(m, entry);
|
||||
}
|
||||
total = total_free + total_used;
|
||||
|
||||
seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
|
||||
seq_printf(m, "total: %llu, used %llu free %llu\n", total,
|
||||
total_used, total_free);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mm_dump_table);
|
||||
|
@ -152,12 +152,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
||||
seq_puts(m, " (pp");
|
||||
else
|
||||
seq_puts(m, " (g");
|
||||
seq_printf(m, "gtt offset: %08lx, size: %08lx, type: %u)",
|
||||
seq_printf(m, "gtt offset: %08llx, size: %08llx, type: %u)",
|
||||
vma->node.start, vma->node.size,
|
||||
vma->ggtt_view.type);
|
||||
}
|
||||
if (obj->stolen)
|
||||
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
|
||||
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
|
||||
if (obj->pin_mappable || obj->fault_mappable) {
|
||||
char s[3], *t = s;
|
||||
if (obj->pin_mappable)
|
||||
|
@ -622,7 +622,7 @@ static int i915_drm_suspend(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_drm_suspend_late(struct drm_device *drm_dev)
|
||||
static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = drm_dev->dev_private;
|
||||
int ret;
|
||||
@ -636,7 +636,17 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev)
|
||||
}
|
||||
|
||||
pci_disable_device(drm_dev->pdev);
|
||||
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
|
||||
/*
|
||||
* During hibernation on some GEN4 platforms the BIOS may try to access
|
||||
* the device even though it's already in D3 and hang the machine. So
|
||||
* leave the device in D0 on those platforms and hope the BIOS will
|
||||
* power down the device properly. Platforms where this was seen:
|
||||
* Lenovo Thinkpad X301, X61s
|
||||
*/
|
||||
if (!(hibernation &&
|
||||
drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
|
||||
INTEL_INFO(dev_priv)->gen == 4))
|
||||
pci_set_power_state(drm_dev->pdev, PCI_D3hot);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -662,7 +672,7 @@ int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return i915_drm_suspend_late(dev);
|
||||
return i915_drm_suspend_late(dev, false);
|
||||
}
|
||||
|
||||
static int i915_drm_resume(struct drm_device *dev)
|
||||
@ -950,7 +960,17 @@ static int i915_pm_suspend_late(struct device *dev)
|
||||
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
return i915_drm_suspend_late(drm_dev);
|
||||
return i915_drm_suspend_late(drm_dev, false);
|
||||
}
|
||||
|
||||
static int i915_pm_poweroff_late(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_to_i915(dev)->dev;
|
||||
|
||||
if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
|
||||
return 0;
|
||||
|
||||
return i915_drm_suspend_late(drm_dev, true);
|
||||
}
|
||||
|
||||
static int i915_pm_resume_early(struct device *dev)
|
||||
@ -1520,7 +1540,7 @@ static const struct dev_pm_ops i915_pm_ops = {
|
||||
.thaw_early = i915_pm_resume_early,
|
||||
.thaw = i915_pm_resume,
|
||||
.poweroff = i915_pm_suspend,
|
||||
.poweroff_late = i915_pm_suspend_late,
|
||||
.poweroff_late = i915_pm_poweroff_late,
|
||||
.restore_early = i915_pm_resume_early,
|
||||
.restore = i915_pm_resume,
|
||||
|
||||
|
@ -1145,7 +1145,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
|
||||
|
||||
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
|
||||
|
||||
DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
|
||||
DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
|
||||
ppgtt->node.size >> 20,
|
||||
ppgtt->node.start / PAGE_SIZE);
|
||||
|
||||
@ -1713,8 +1713,8 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
|
||||
|
||||
static void i915_gtt_color_adjust(struct drm_mm_node *node,
|
||||
unsigned long color,
|
||||
unsigned long *start,
|
||||
unsigned long *end)
|
||||
u64 *start,
|
||||
u64 *end)
|
||||
{
|
||||
if (node->color != color)
|
||||
*start += 4096;
|
||||
|
@ -282,16 +282,6 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
return !intel_crtc->cpu_fifo_underrun_disabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
|
||||
* @dev_priv: i915 device instance
|
||||
@ -352,9 +342,15 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
|
||||
void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
|
||||
|
||||
/* We may be called too early in init, thanks BIOS! */
|
||||
if (crtc == NULL)
|
||||
return;
|
||||
|
||||
/* GMCH can't disable fifo underruns, filter them. */
|
||||
if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
|
||||
!__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
|
||||
to_intel_crtc(crtc)->cpu_fifo_underrun_disabled)
|
||||
return;
|
||||
|
||||
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
|
||||
|
@ -70,7 +70,9 @@ static const struct dw_hdmi_curr_ctrl imx_cur_ctr[] = {
|
||||
118800000, { 0x091c, 0x091c, 0x06dc },
|
||||
}, {
|
||||
216000000, { 0x06dc, 0x0b5c, 0x091c },
|
||||
}
|
||||
}, {
|
||||
~0UL, { 0x0000, 0x0000, 0x0000 },
|
||||
},
|
||||
};
|
||||
|
||||
static const struct dw_hdmi_sym_term imx_sym_term[] = {
|
||||
@ -136,11 +138,34 @@ static struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
|
||||
.destroy = drm_encoder_cleanup,
|
||||
};
|
||||
|
||||
static enum drm_mode_status imx6q_hdmi_mode_valid(struct drm_connector *con,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
if (mode->clock < 13500)
|
||||
return MODE_CLOCK_LOW;
|
||||
if (mode->clock > 266000)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static enum drm_mode_status imx6dl_hdmi_mode_valid(struct drm_connector *con,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
if (mode->clock < 13500)
|
||||
return MODE_CLOCK_LOW;
|
||||
if (mode->clock > 270000)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
static struct dw_hdmi_plat_data imx6q_hdmi_drv_data = {
|
||||
.mpll_cfg = imx_mpll_cfg,
|
||||
.cur_ctr = imx_cur_ctr,
|
||||
.sym_term = imx_sym_term,
|
||||
.dev_type = IMX6Q_HDMI,
|
||||
.mpll_cfg = imx_mpll_cfg,
|
||||
.cur_ctr = imx_cur_ctr,
|
||||
.sym_term = imx_sym_term,
|
||||
.dev_type = IMX6Q_HDMI,
|
||||
.mode_valid = imx6q_hdmi_mode_valid,
|
||||
};
|
||||
|
||||
static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
|
||||
@ -148,6 +173,7 @@ static struct dw_hdmi_plat_data imx6dl_hdmi_drv_data = {
|
||||
.cur_ctr = imx_cur_ctr,
|
||||
.sym_term = imx_sym_term,
|
||||
.dev_type = IMX6DL_HDMI,
|
||||
.mode_valid = imx6dl_hdmi_mode_valid,
|
||||
};
|
||||
|
||||
static const struct of_device_id dw_hdmi_imx_dt_ids[] = {
|
||||
|
@ -163,22 +163,7 @@ static void imx_ldb_encoder_prepare(struct drm_encoder *encoder)
|
||||
{
|
||||
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
|
||||
struct imx_ldb *ldb = imx_ldb_ch->ldb;
|
||||
struct drm_display_mode *mode = &encoder->crtc->hwmode;
|
||||
u32 pixel_fmt;
|
||||
unsigned long serial_clk;
|
||||
unsigned long di_clk = mode->clock * 1000;
|
||||
int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
|
||||
|
||||
if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
|
||||
/* dual channel LVDS mode */
|
||||
serial_clk = 3500UL * mode->clock;
|
||||
imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
|
||||
imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
|
||||
} else {
|
||||
serial_clk = 7000UL * mode->clock;
|
||||
imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
|
||||
di_clk);
|
||||
}
|
||||
|
||||
switch (imx_ldb_ch->chno) {
|
||||
case 0:
|
||||
@ -247,6 +232,9 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
|
||||
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
|
||||
struct imx_ldb *ldb = imx_ldb_ch->ldb;
|
||||
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
|
||||
unsigned long serial_clk;
|
||||
unsigned long di_clk = mode->clock * 1000;
|
||||
int mux = imx_drm_encoder_get_mux_id(imx_ldb_ch->child, encoder);
|
||||
|
||||
if (mode->clock > 170000) {
|
||||
dev_warn(ldb->dev,
|
||||
@ -257,6 +245,16 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
|
||||
"%s: mode exceeds 85 MHz pixel clock\n", __func__);
|
||||
}
|
||||
|
||||
if (dual) {
|
||||
serial_clk = 3500UL * mode->clock;
|
||||
imx_ldb_set_clock(ldb, mux, 0, serial_clk, di_clk);
|
||||
imx_ldb_set_clock(ldb, mux, 1, serial_clk, di_clk);
|
||||
} else {
|
||||
serial_clk = 7000UL * mode->clock;
|
||||
imx_ldb_set_clock(ldb, mux, imx_ldb_ch->chno, serial_clk,
|
||||
di_clk);
|
||||
}
|
||||
|
||||
/* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
|
||||
if (imx_ldb_ch == &ldb->channel[0]) {
|
||||
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
|
||||
|
@ -236,8 +236,11 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
|
||||
}
|
||||
|
||||
panel_node = of_parse_phandle(np, "fsl,panel", 0);
|
||||
if (panel_node)
|
||||
if (panel_node) {
|
||||
imxpd->panel = of_drm_find_panel(panel_node);
|
||||
if (!imxpd->panel)
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
imxpd->dev = dev;
|
||||
|
||||
|
@ -32,7 +32,10 @@ static void mdp4_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
|
||||
void mdp4_irq_preinstall(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
||||
mdp4_enable(mdp4_kms);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
|
||||
mdp4_disable(mdp4_kms);
|
||||
}
|
||||
|
||||
int mdp4_irq_postinstall(struct msm_kms *kms)
|
||||
@ -53,7 +56,9 @@ int mdp4_irq_postinstall(struct msm_kms *kms)
|
||||
void mdp4_irq_uninstall(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
|
||||
mdp4_enable(mdp4_kms);
|
||||
mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
|
||||
mdp4_disable(mdp4_kms);
|
||||
}
|
||||
|
||||
irqreturn_t mdp4_irq(struct msm_kms *kms)
|
||||
|
@ -8,17 +8,9 @@ http://github.com/freedreno/envytools/
|
||||
git clone https://github.com/freedreno/envytools.git
|
||||
|
||||
The rules-ng-ng source files this header was generated from are:
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57)
|
||||
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00)
|
||||
- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 27229 bytes, from 2015-02-10 17:00:41)
|
||||
- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15)
|
||||
- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-01-23 16:20:19)
|
||||
|
||||
Copyright (C) 2013-2015 by the following authors:
|
||||
- Rob Clark <robdclark@gmail.com> (robclark)
|
||||
@ -910,6 +902,7 @@ static inline uint32_t __offset_LM(uint32_t idx)
|
||||
case 2: return (mdp5_cfg->lm.base[2]);
|
||||
case 3: return (mdp5_cfg->lm.base[3]);
|
||||
case 4: return (mdp5_cfg->lm.base[4]);
|
||||
case 5: return (mdp5_cfg->lm.base[5]);
|
||||
default: return INVALID_IDX(idx);
|
||||
}
|
||||
}
|
||||
|
@ -62,8 +62,8 @@ struct mdp5_crtc {
|
||||
|
||||
/* current cursor being scanned out: */
|
||||
struct drm_gem_object *scanout_bo;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t width, height;
|
||||
uint32_t x, y;
|
||||
} cursor;
|
||||
};
|
||||
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
|
||||
@ -103,8 +103,8 @@ static void crtc_flush_all(struct drm_crtc *crtc)
|
||||
struct drm_plane *plane;
|
||||
uint32_t flush_mask = 0;
|
||||
|
||||
/* we could have already released CTL in the disable path: */
|
||||
if (!mdp5_crtc->ctl)
|
||||
/* this should not happen: */
|
||||
if (WARN_ON(!mdp5_crtc->ctl))
|
||||
return;
|
||||
|
||||
drm_atomic_crtc_for_each_plane(plane, crtc) {
|
||||
@ -143,6 +143,11 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
|
||||
drm_atomic_crtc_for_each_plane(plane, crtc) {
|
||||
mdp5_plane_complete_flip(plane);
|
||||
}
|
||||
|
||||
if (mdp5_crtc->ctl && !crtc->state->enable) {
|
||||
mdp5_ctl_release(mdp5_crtc->ctl);
|
||||
mdp5_crtc->ctl = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void unref_cursor_worker(struct drm_flip_work *work, void *val)
|
||||
@ -386,14 +391,17 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
|
||||
mdp5_crtc->event = crtc->state->event;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
/*
|
||||
* If no CTL has been allocated in mdp5_crtc_atomic_check(),
|
||||
* it means we are trying to flush a CRTC whose state is disabled:
|
||||
* nothing else needs to be done.
|
||||
*/
|
||||
if (unlikely(!mdp5_crtc->ctl))
|
||||
return;
|
||||
|
||||
blend_setup(crtc);
|
||||
crtc_flush_all(crtc);
|
||||
request_pending(crtc, PENDING_FLIP);
|
||||
|
||||
if (mdp5_crtc->ctl && !crtc->state->enable) {
|
||||
mdp5_ctl_release(mdp5_crtc->ctl);
|
||||
mdp5_crtc->ctl = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int mdp5_crtc_set_property(struct drm_crtc *crtc,
|
||||
@ -403,6 +411,32 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
uint32_t xres = crtc->mode.hdisplay;
|
||||
uint32_t yres = crtc->mode.vdisplay;
|
||||
|
||||
/*
|
||||
* Cursor Region Of Interest (ROI) is a plane read from cursor
|
||||
* buffer to render. The ROI region is determined by the visibility of
|
||||
* the cursor point. In the default Cursor image the cursor point will
|
||||
* be at the top left of the cursor image, unless it is specified
|
||||
* otherwise using hotspot feature.
|
||||
*
|
||||
* If the cursor point reaches the right (xres - x < cursor.width) or
|
||||
* bottom (yres - y < cursor.height) boundary of the screen, then ROI
|
||||
* width and ROI height need to be evaluated to crop the cursor image
|
||||
* accordingly.
|
||||
* (xres-x) will be new cursor width when x > (xres - cursor.width)
|
||||
* (yres-y) will be new cursor height when y > (yres - cursor.height)
|
||||
*/
|
||||
*roi_w = min(mdp5_crtc->cursor.width, xres -
|
||||
mdp5_crtc->cursor.x);
|
||||
*roi_h = min(mdp5_crtc->cursor.height, yres -
|
||||
mdp5_crtc->cursor.y);
|
||||
}
|
||||
|
||||
static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
struct drm_file *file, uint32_t handle,
|
||||
uint32_t width, uint32_t height)
|
||||
@ -416,6 +450,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
unsigned int depth;
|
||||
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
|
||||
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
|
||||
uint32_t roi_w, roi_h;
|
||||
unsigned long flags;
|
||||
|
||||
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
|
||||
@ -446,6 +481,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
|
||||
old_bo = mdp5_crtc->cursor.scanout_bo;
|
||||
|
||||
mdp5_crtc->cursor.scanout_bo = cursor_bo;
|
||||
mdp5_crtc->cursor.width = width;
|
||||
mdp5_crtc->cursor.height = height;
|
||||
|
||||
get_roi(crtc, &roi_w, &roi_h);
|
||||
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
|
||||
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
|
||||
@ -453,19 +494,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
|
||||
MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
|
||||
MDP5_LM_CURSOR_SIZE_ROI_H(height) |
|
||||
MDP5_LM_CURSOR_SIZE_ROI_W(width));
|
||||
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
|
||||
MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
|
||||
|
||||
|
||||
blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
|
||||
blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN;
|
||||
blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
|
||||
|
||||
mdp5_crtc->cursor.scanout_bo = cursor_bo;
|
||||
mdp5_crtc->cursor.width = width;
|
||||
mdp5_crtc->cursor.height = height;
|
||||
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
|
||||
|
||||
ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
|
||||
@ -489,31 +525,18 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
|
||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
|
||||
uint32_t xres = crtc->mode.hdisplay;
|
||||
uint32_t yres = crtc->mode.vdisplay;
|
||||
uint32_t roi_w;
|
||||
uint32_t roi_h;
|
||||
unsigned long flags;
|
||||
|
||||
x = (x > 0) ? x : 0;
|
||||
y = (y > 0) ? y : 0;
|
||||
/* In case the CRTC is disabled, just drop the cursor update */
|
||||
if (unlikely(!crtc->state->enable))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Cursor Region Of Interest (ROI) is a plane read from cursor
|
||||
* buffer to render. The ROI region is determined by the visiblity of
|
||||
* the cursor point. In the default Cursor image the cursor point will
|
||||
* be at the top left of the cursor image, unless it is specified
|
||||
* otherwise using hotspot feature.
|
||||
*
|
||||
* If the cursor point reaches the right (xres - x < cursor.width) or
|
||||
* bottom (yres - y < cursor.height) boundary of the screen, then ROI
|
||||
* width and ROI height need to be evaluated to crop the cursor image
|
||||
* accordingly.
|
||||
* (xres-x) will be new cursor width when x > (xres - cursor.width)
|
||||
* (yres-y) will be new cursor height when y > (yres - cursor.height)
|
||||
*/
|
||||
roi_w = min(mdp5_crtc->cursor.width, xres - x);
|
||||
roi_h = min(mdp5_crtc->cursor.height, yres - y);
|
||||
mdp5_crtc->cursor.x = x = max(x, 0);
|
||||
mdp5_crtc->cursor.y = y = max(y, 0);
|
||||
|
||||
get_roi(crtc, &roi_w, &roi_h);
|
||||
|
||||
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
|
||||
@ -544,8 +567,8 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
|
||||
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
|
||||
.mode_fixup = mdp5_crtc_mode_fixup,
|
||||
.mode_set_nofb = mdp5_crtc_mode_set_nofb,
|
||||
.prepare = mdp5_crtc_disable,
|
||||
.commit = mdp5_crtc_enable,
|
||||
.disable = mdp5_crtc_disable,
|
||||
.enable = mdp5_crtc_enable,
|
||||
.atomic_check = mdp5_crtc_atomic_check,
|
||||
.atomic_begin = mdp5_crtc_atomic_begin,
|
||||
.atomic_flush = mdp5_crtc_atomic_flush,
|
||||
|
@ -267,14 +267,14 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
|
||||
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
|
||||
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
|
||||
|
||||
mdp5_encoder->enabled = false;
|
||||
mdp5_encoder->enabled = true;
|
||||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
|
||||
.mode_fixup = mdp5_encoder_mode_fixup,
|
||||
.mode_set = mdp5_encoder_mode_set,
|
||||
.prepare = mdp5_encoder_disable,
|
||||
.commit = mdp5_encoder_enable,
|
||||
.disable = mdp5_encoder_disable,
|
||||
.enable = mdp5_encoder_enable,
|
||||
};
|
||||
|
||||
/* initialize encoder */
|
||||
|
@ -34,7 +34,10 @@ static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
|
||||
void mdp5_irq_preinstall(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
mdp5_enable(mdp5_kms);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
|
||||
mdp5_disable(mdp5_kms);
|
||||
}
|
||||
|
||||
int mdp5_irq_postinstall(struct msm_kms *kms)
|
||||
@ -57,7 +60,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
|
||||
void mdp5_irq_uninstall(struct msm_kms *kms)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
mdp5_enable(mdp5_kms);
|
||||
mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
|
||||
mdp5_disable(mdp5_kms);
|
||||
}
|
||||
|
||||
static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
|
||||
|
@ -219,8 +219,10 @@ int msm_atomic_commit(struct drm_device *dev,
|
||||
* mark our set of crtc's as busy:
|
||||
*/
|
||||
ret = start_atomic(dev->dev_private, c->crtc_mask);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
kfree(c);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the point of no return - everything below never fails except
|
||||
|
@ -418,7 +418,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
nouveau_fbcon_zfill(dev, fbcon);
|
||||
|
||||
/* To allow resizeing without swapping buffers */
|
||||
NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n",
|
||||
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
|
||||
nouveau_fb->base.width, nouveau_fb->base.height,
|
||||
nvbo->bo.offset, nvbo);
|
||||
|
||||
|
@ -1405,6 +1405,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
|
||||
(x << 16) | y);
|
||||
viewport_w = crtc->mode.hdisplay;
|
||||
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
|
||||
if ((rdev->family >= CHIP_BONAIRE) &&
|
||||
(crtc->mode.flags & DRM_MODE_FLAG_INTERLACE))
|
||||
viewport_h *= 2;
|
||||
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
|
||||
(viewport_w << 16) | viewport_h);
|
||||
|
||||
|
@ -1626,7 +1626,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
|
||||
struct radeon_connector *radeon_connector = NULL;
|
||||
struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
|
||||
bool travis_quirk = false;
|
||||
int encoder_mode;
|
||||
|
||||
if (connector) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
@ -1722,13 +1721,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
encoder_mode = atombios_get_encoder_mode(encoder);
|
||||
if (connector && (radeon_audio != 0) &&
|
||||
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
|
||||
(ENCODER_MODE_IS_DP(encoder_mode) &&
|
||||
drm_detect_monitor_audio(radeon_connector_edid(connector)))))
|
||||
radeon_audio_dpms(encoder, mode);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1737,10 +1729,19 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
int encoder_mode = atombios_get_encoder_mode(encoder);
|
||||
|
||||
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
|
||||
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
|
||||
radeon_encoder->active_device);
|
||||
|
||||
if (connector && (radeon_audio != 0) &&
|
||||
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
|
||||
(ENCODER_MODE_IS_DP(encoder_mode) &&
|
||||
drm_detect_monitor_audio(radeon_connector_edid(connector)))))
|
||||
radeon_audio_dpms(encoder, mode);
|
||||
|
||||
switch (radeon_encoder->encoder_id) {
|
||||
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
|
||||
@ -2170,12 +2171,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
|
||||
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
|
||||
/* handled in dpms */
|
||||
encoder_mode = atombios_get_encoder_mode(encoder);
|
||||
if (connector && (radeon_audio != 0) &&
|
||||
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
|
||||
(ENCODER_MODE_IS_DP(encoder_mode) &&
|
||||
drm_detect_monitor_audio(radeon_connector_edid(connector)))))
|
||||
radeon_audio_mode_set(encoder, adjusted_mode);
|
||||
break;
|
||||
case ENCODER_OBJECT_ID_INTERNAL_DDI:
|
||||
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
|
||||
@ -2197,6 +2192,13 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
|
||||
}
|
||||
|
||||
atombios_apply_encoder_quirks(encoder, adjusted_mode);
|
||||
|
||||
encoder_mode = atombios_get_encoder_mode(encoder);
|
||||
if (connector && (radeon_audio != 0) &&
|
||||
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
|
||||
(ENCODER_MODE_IS_DP(encoder_mode) &&
|
||||
drm_detect_monitor_audio(radeon_connector_edid(connector)))))
|
||||
radeon_audio_mode_set(encoder, adjusted_mode);
|
||||
}
|
||||
|
||||
static bool
|
||||
|
@ -7555,6 +7555,9 @@ int cik_irq_set(struct radeon_device *rdev)
|
||||
WREG32(DC_HPD5_INT_CONTROL, hpd5);
|
||||
WREG32(DC_HPD6_INT_CONTROL, hpd6);
|
||||
|
||||
/* posting read */
|
||||
RREG32(SRBM_STATUS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,9 @@
|
||||
#include "radeon_audio.h"
|
||||
#include "sid.h"
|
||||
|
||||
#define DCE8_DCCG_AUDIO_DTO1_PHASE 0x05b8
|
||||
#define DCE8_DCCG_AUDIO_DTO1_MODULE 0x05bc
|
||||
|
||||
u32 dce6_endpoint_rreg(struct radeon_device *rdev,
|
||||
u32 block_offset, u32 reg)
|
||||
{
|
||||
@ -252,72 +255,67 @@ void dce6_audio_enable(struct radeon_device *rdev,
|
||||
void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
{
|
||||
/* Two dtos; generally use dto0 for HDMI */
|
||||
/* Two dtos; generally use dto0 for HDMI */
|
||||
u32 value = 0;
|
||||
|
||||
if (crtc)
|
||||
if (crtc)
|
||||
value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
|
||||
|
||||
WREG32(DCCG_AUDIO_DTO_SOURCE, value);
|
||||
|
||||
/* Express [24MHz / target pixel clock] as an exact rational
|
||||
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
|
||||
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
|
||||
*/
|
||||
WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
|
||||
WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
|
||||
/* Express [24MHz / target pixel clock] as an exact rational
|
||||
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
|
||||
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
|
||||
*/
|
||||
WREG32(DCCG_AUDIO_DTO0_PHASE, 24000);
|
||||
WREG32(DCCG_AUDIO_DTO0_MODULE, clock);
|
||||
}
|
||||
|
||||
void dce6_dp_audio_set_dto(struct radeon_device *rdev,
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
{
|
||||
/* Two dtos; generally use dto1 for DP */
|
||||
/* Two dtos; generally use dto1 for DP */
|
||||
u32 value = 0;
|
||||
value |= DCCG_AUDIO_DTO_SEL;
|
||||
|
||||
if (crtc)
|
||||
if (crtc)
|
||||
value |= DCCG_AUDIO_DTO0_SOURCE_SEL(crtc->crtc_id);
|
||||
|
||||
WREG32(DCCG_AUDIO_DTO_SOURCE, value);
|
||||
|
||||
/* Express [24MHz / target pixel clock] as an exact rational
|
||||
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
|
||||
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
|
||||
*/
|
||||
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
|
||||
WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
|
||||
/* Express [24MHz / target pixel clock] as an exact rational
|
||||
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
|
||||
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
|
||||
*/
|
||||
if (ASIC_IS_DCE8(rdev)) {
|
||||
WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
|
||||
WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
|
||||
} else {
|
||||
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
|
||||
WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
|
||||
}
|
||||
}
|
||||
|
||||
void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
|
||||
void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
uint32_t offset;
|
||||
|
||||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
offset = dig->afmt->offset;
|
||||
|
||||
if (enable) {
|
||||
if (dig->afmt->enabled)
|
||||
return;
|
||||
|
||||
WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + offset,
|
||||
EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
|
||||
EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
|
||||
EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
|
||||
EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, true);
|
||||
WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
|
||||
EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
|
||||
EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
|
||||
EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
|
||||
EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
|
||||
EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
|
||||
} else {
|
||||
if (!dig->afmt->enabled)
|
||||
return;
|
||||
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, false);
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
|
||||
}
|
||||
|
||||
dig->afmt->enabled = enable;
|
||||
|
@ -4593,6 +4593,9 @@ int evergreen_irq_set(struct radeon_device *rdev)
|
||||
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
|
||||
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
|
||||
|
||||
/* posting read */
|
||||
RREG32(SRBM_STATUS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -272,7 +272,7 @@ void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
|
||||
}
|
||||
|
||||
void dce4_dp_audio_set_dto(struct radeon_device *rdev,
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
struct radeon_crtc *crtc, unsigned int clock)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
@ -294,7 +294,7 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
|
||||
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
|
||||
*/
|
||||
WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
|
||||
WREG32(DCCG_AUDIO_DTO1_MODULE, rdev->clock.max_pixel_clock * 10);
|
||||
WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
|
||||
}
|
||||
|
||||
void dce4_set_vbi_packet(struct drm_encoder *encoder, u32 offset)
|
||||
@ -350,20 +350,9 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
|
||||
HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
|
||||
HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
|
||||
|
||||
WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
|
||||
AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
|
||||
|
||||
WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
|
||||
HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
|
||||
|
||||
WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
|
||||
HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
|
||||
HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
|
||||
|
||||
WREG32(AFMT_60958_0 + offset,
|
||||
AFMT_60958_CS_CHANNEL_NUMBER_L(1));
|
||||
|
||||
@ -408,15 +397,19 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
|
||||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
/* Silent, r600_hdmi_enable will raise WARN for us */
|
||||
if (enable && dig->afmt->enabled)
|
||||
return;
|
||||
if (!enable && !dig->afmt->enabled)
|
||||
return;
|
||||
if (enable) {
|
||||
WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset,
|
||||
HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
|
||||
|
||||
if (!enable && dig->afmt->pin) {
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0);
|
||||
dig->afmt->pin = NULL;
|
||||
WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
|
||||
HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
|
||||
HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
|
||||
|
||||
WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
|
||||
HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
|
||||
HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
|
||||
} else {
|
||||
WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
|
||||
}
|
||||
|
||||
dig->afmt->enabled = enable;
|
||||
@ -425,33 +418,28 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
|
||||
enable ? "En" : "Dis", dig->afmt->offset, radeon_encoder->encoder_id);
|
||||
}
|
||||
|
||||
void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
|
||||
void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
uint32_t offset;
|
||||
|
||||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
offset = dig->afmt->offset;
|
||||
|
||||
if (enable) {
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct radeon_connector_atom_dig *dig_connector;
|
||||
uint32_t val;
|
||||
|
||||
if (dig->afmt->enabled)
|
||||
return;
|
||||
|
||||
WREG32(EVERGREEN_DP_SEC_TIMESTAMP + offset, EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
|
||||
WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
|
||||
EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
|
||||
|
||||
if (radeon_connector->con_priv) {
|
||||
dig_connector = radeon_connector->con_priv;
|
||||
val = RREG32(EVERGREEN_DP_SEC_AUD_N + offset);
|
||||
val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
|
||||
val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
|
||||
|
||||
if (dig_connector->dp_clock == 162000)
|
||||
@ -459,21 +447,16 @@ void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable)
|
||||
else
|
||||
val |= EVERGREEN_DP_SEC_N_BASE_MULTIPLE(5);
|
||||
|
||||
WREG32(EVERGREEN_DP_SEC_AUD_N + offset, val);
|
||||
WREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset, val);
|
||||
}
|
||||
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + offset,
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
|
||||
EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
|
||||
EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
|
||||
EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
|
||||
EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
|
||||
} else {
|
||||
if (!dig->afmt->enabled)
|
||||
return;
|
||||
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + offset, 0);
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0);
|
||||
WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
|
||||
}
|
||||
|
||||
dig->afmt->enabled = enable;
|
||||
|
@ -728,6 +728,10 @@ int r100_irq_set(struct radeon_device *rdev)
|
||||
tmp |= RADEON_FP2_DETECT_MASK;
|
||||
}
|
||||
WREG32(RADEON_GEN_INT_CNTL, tmp);
|
||||
|
||||
/* read back to post the write */
|
||||
RREG32(RADEON_GEN_INT_CNTL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3784,6 +3784,9 @@ int r600_irq_set(struct radeon_device *rdev)
|
||||
WREG32(RV770_CG_THERMAL_INT, thermal_int);
|
||||
}
|
||||
|
||||
/* posting read */
|
||||
RREG32(R_000E50_SRBM_STATUS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -476,17 +476,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
|
||||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
/* Silent, r600_hdmi_enable will raise WARN for us */
|
||||
if (enable && dig->afmt->enabled)
|
||||
return;
|
||||
if (!enable && !dig->afmt->enabled)
|
||||
return;
|
||||
|
||||
if (!enable && dig->afmt->pin) {
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0);
|
||||
dig->afmt->pin = NULL;
|
||||
}
|
||||
|
||||
/* Older chipsets require setting HDMI and routing manually */
|
||||
if (!ASIC_IS_DCE3(rdev)) {
|
||||
if (enable)
|
||||
|
@ -101,8 +101,8 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode);
|
||||
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
|
||||
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
|
||||
void evergreen_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable);
|
||||
void dce6_enable_dp_audio_packets(struct drm_encoder *encoder, bool enable);
|
||||
void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
|
||||
void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
|
||||
|
||||
static const u32 pin_offsets[7] =
|
||||
{
|
||||
@ -210,7 +210,7 @@ static struct radeon_audio_funcs dce4_dp_funcs = {
|
||||
.set_avi_packet = evergreen_set_avi_packet,
|
||||
.set_audio_packet = dce4_set_audio_packet,
|
||||
.mode_set = radeon_audio_dp_mode_set,
|
||||
.dpms = evergreen_enable_dp_audio_packets,
|
||||
.dpms = evergreen_dp_enable,
|
||||
};
|
||||
|
||||
static struct radeon_audio_funcs dce6_hdmi_funcs = {
|
||||
@ -240,7 +240,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
|
||||
.set_avi_packet = evergreen_set_avi_packet,
|
||||
.set_audio_packet = dce4_set_audio_packet,
|
||||
.mode_set = radeon_audio_dp_mode_set,
|
||||
.dpms = dce6_enable_dp_audio_packets,
|
||||
.dpms = dce6_dp_enable,
|
||||
};
|
||||
|
||||
static void radeon_audio_interface_init(struct radeon_device *rdev)
|
||||
@ -452,7 +452,7 @@ void radeon_audio_enable(struct radeon_device *rdev,
|
||||
}
|
||||
|
||||
void radeon_audio_detect(struct drm_connector *connector,
|
||||
enum drm_connector_status status)
|
||||
enum drm_connector_status status)
|
||||
{
|
||||
struct radeon_device *rdev;
|
||||
struct radeon_encoder *radeon_encoder;
|
||||
@ -483,14 +483,11 @@ void radeon_audio_detect(struct drm_connector *connector,
|
||||
else
|
||||
radeon_encoder->audio = rdev->audio.hdmi_funcs;
|
||||
|
||||
radeon_audio_write_speaker_allocation(connector->encoder);
|
||||
radeon_audio_write_sad_regs(connector->encoder);
|
||||
if (connector->encoder->crtc)
|
||||
radeon_audio_write_latency_fields(connector->encoder,
|
||||
&connector->encoder->crtc->mode);
|
||||
dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
|
||||
} else {
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0);
|
||||
dig->afmt->pin = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -694,23 +691,22 @@ static void radeon_audio_set_mute(struct drm_encoder *encoder, bool mute)
|
||||
* update the info frames with the data from the current display mode
|
||||
*/
|
||||
static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct radeon_device *rdev = encoder->dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
|
||||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
/* disable audio prior to setting up hw */
|
||||
dig->afmt->pin = radeon_audio_get_pin(encoder);
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0);
|
||||
radeon_audio_set_mute(encoder, true);
|
||||
|
||||
radeon_audio_write_speaker_allocation(encoder);
|
||||
radeon_audio_write_sad_regs(encoder);
|
||||
radeon_audio_write_latency_fields(encoder, mode);
|
||||
radeon_audio_set_dto(encoder, mode->clock);
|
||||
radeon_audio_set_vbi_packet(encoder);
|
||||
radeon_hdmi_set_color_depth(encoder);
|
||||
radeon_audio_set_mute(encoder, false);
|
||||
radeon_audio_update_acr(encoder, mode->clock);
|
||||
radeon_audio_set_audio_packet(encoder);
|
||||
radeon_audio_select_pin(encoder);
|
||||
@ -718,8 +714,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
|
||||
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
|
||||
return;
|
||||
|
||||
/* enable audio after to setting up hw */
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
|
||||
radeon_audio_set_mute(encoder, false);
|
||||
}
|
||||
|
||||
static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
|
||||
@ -729,23 +724,26 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
|
||||
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct radeon_connector_atom_dig *dig_connector =
|
||||
radeon_connector->con_priv;
|
||||
|
||||
if (!dig || !dig->afmt)
|
||||
return;
|
||||
|
||||
/* disable audio prior to setting up hw */
|
||||
dig->afmt->pin = radeon_audio_get_pin(encoder);
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0);
|
||||
|
||||
radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
|
||||
radeon_audio_write_speaker_allocation(encoder);
|
||||
radeon_audio_write_sad_regs(encoder);
|
||||
radeon_audio_write_latency_fields(encoder, mode);
|
||||
if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
|
||||
radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
|
||||
else
|
||||
radeon_audio_set_dto(encoder, dig_connector->dp_clock);
|
||||
radeon_audio_set_audio_packet(encoder);
|
||||
radeon_audio_select_pin(encoder);
|
||||
|
||||
if (radeon_audio_set_avi_packet(encoder, mode) < 0)
|
||||
return;
|
||||
|
||||
/* enable audio after to setting up hw */
|
||||
radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
|
||||
}
|
||||
|
||||
void radeon_audio_mode_set(struct drm_encoder *encoder,
|
||||
|
@ -256,11 +256,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
||||
u32 ring = RADEON_CS_RING_GFX;
|
||||
s32 priority = 0;
|
||||
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
|
||||
if (!cs->num_chunks) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* get chunks */
|
||||
INIT_LIST_HEAD(&p->validated);
|
||||
p->idx = 0;
|
||||
p->ib.sa_bo = NULL;
|
||||
p->const_ib.sa_bo = NULL;
|
||||
|
@ -694,6 +694,10 @@ int rs600_irq_set(struct radeon_device *rdev)
|
||||
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
|
||||
if (ASIC_IS_DCE2(rdev))
|
||||
WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
|
||||
|
||||
/* posting read */
|
||||
RREG32(R_000040_GEN_INT_CNTL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -6203,6 +6203,9 @@ int si_irq_set(struct radeon_device *rdev)
|
||||
|
||||
WREG32(CG_THERMAL_INT, thermal_int);
|
||||
|
||||
/* posting read */
|
||||
RREG32(SRBM_STATUS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -912,8 +912,8 @@
|
||||
|
||||
#define DCCG_AUDIO_DTO0_PHASE 0x05b0
|
||||
#define DCCG_AUDIO_DTO0_MODULE 0x05b4
|
||||
#define DCCG_AUDIO_DTO1_PHASE 0x05b8
|
||||
#define DCCG_AUDIO_DTO1_MODULE 0x05bc
|
||||
#define DCCG_AUDIO_DTO1_PHASE 0x05c0
|
||||
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
|
||||
|
||||
#define AFMT_AUDIO_SRC_CONTROL 0x713c
|
||||
#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
|
||||
|
@ -74,7 +74,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
|
||||
pr_err(" has_type: %d\n", man->has_type);
|
||||
pr_err(" use_type: %d\n", man->use_type);
|
||||
pr_err(" flags: 0x%08X\n", man->flags);
|
||||
pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset);
|
||||
pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset);
|
||||
pr_err(" size: %llu\n", man->size);
|
||||
pr_err(" available_caching: 0x%08X\n", man->available_caching);
|
||||
pr_err(" default_caching: 0x%08X\n", man->default_caching);
|
||||
|
@ -459,6 +459,8 @@ static void ipu_di_config_clock(struct ipu_di *di,
|
||||
|
||||
clkrate = clk_get_rate(di->clk_ipu);
|
||||
div = DIV_ROUND_CLOSEST(clkrate, sig->mode.pixelclock);
|
||||
if (div == 0)
|
||||
div = 1;
|
||||
rate = clkrate / div;
|
||||
|
||||
error = rate / (sig->mode.pixelclock / 1000);
|
||||
|
@ -68,8 +68,8 @@ struct drm_mm_node {
|
||||
unsigned scanned_preceeds_hole : 1;
|
||||
unsigned allocated : 1;
|
||||
unsigned long color;
|
||||
unsigned long start;
|
||||
unsigned long size;
|
||||
u64 start;
|
||||
u64 size;
|
||||
struct drm_mm *mm;
|
||||
};
|
||||
|
||||
@ -82,16 +82,16 @@ struct drm_mm {
|
||||
unsigned int scan_check_range : 1;
|
||||
unsigned scan_alignment;
|
||||
unsigned long scan_color;
|
||||
unsigned long scan_size;
|
||||
unsigned long scan_hit_start;
|
||||
unsigned long scan_hit_end;
|
||||
u64 scan_size;
|
||||
u64 scan_hit_start;
|
||||
u64 scan_hit_end;
|
||||
unsigned scanned_blocks;
|
||||
unsigned long scan_start;
|
||||
unsigned long scan_end;
|
||||
u64 scan_start;
|
||||
u64 scan_end;
|
||||
struct drm_mm_node *prev_scanned_node;
|
||||
|
||||
void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
|
||||
unsigned long *start, unsigned long *end);
|
||||
u64 *start, u64 *end);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -124,7 +124,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
|
||||
return mm->hole_stack.next;
|
||||
}
|
||||
|
||||
static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
|
||||
static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
|
||||
{
|
||||
return hole_node->start + hole_node->size;
|
||||
}
|
||||
@ -140,13 +140,13 @@ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_no
|
||||
* Returns:
|
||||
* Start of the subsequent hole.
|
||||
*/
|
||||
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
|
||||
static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
|
||||
{
|
||||
BUG_ON(!hole_node->hole_follows);
|
||||
return __drm_mm_hole_node_start(hole_node);
|
||||
}
|
||||
|
||||
static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
||||
static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
||||
{
|
||||
return list_entry(hole_node->node_list.next,
|
||||
struct drm_mm_node, node_list)->start;
|
||||
@ -163,7 +163,7 @@ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node
|
||||
* Returns:
|
||||
* End of the subsequent hole.
|
||||
*/
|
||||
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
||||
static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
|
||||
{
|
||||
return __drm_mm_hole_node_end(hole_node);
|
||||
}
|
||||
@ -222,7 +222,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
|
||||
|
||||
int drm_mm_insert_node_generic(struct drm_mm *mm,
|
||||
struct drm_mm_node *node,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
enum drm_mm_search_flags sflags,
|
||||
@ -245,7 +245,7 @@ int drm_mm_insert_node_generic(struct drm_mm *mm,
|
||||
*/
|
||||
static inline int drm_mm_insert_node(struct drm_mm *mm,
|
||||
struct drm_mm_node *node,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
enum drm_mm_search_flags flags)
|
||||
{
|
||||
@ -255,11 +255,11 @@ static inline int drm_mm_insert_node(struct drm_mm *mm,
|
||||
|
||||
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
|
||||
struct drm_mm_node *node,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
u64 start,
|
||||
u64 end,
|
||||
enum drm_mm_search_flags sflags,
|
||||
enum drm_mm_allocator_flags aflags);
|
||||
/**
|
||||
@ -282,10 +282,10 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
|
||||
*/
|
||||
static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
|
||||
struct drm_mm_node *node,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
u64 start,
|
||||
u64 end,
|
||||
enum drm_mm_search_flags flags)
|
||||
{
|
||||
return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
|
||||
@ -296,21 +296,21 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
|
||||
void drm_mm_remove_node(struct drm_mm_node *node);
|
||||
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
|
||||
void drm_mm_init(struct drm_mm *mm,
|
||||
unsigned long start,
|
||||
unsigned long size);
|
||||
u64 start,
|
||||
u64 size);
|
||||
void drm_mm_takedown(struct drm_mm *mm);
|
||||
bool drm_mm_clean(struct drm_mm *mm);
|
||||
|
||||
void drm_mm_init_scan(struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color);
|
||||
void drm_mm_init_scan_with_range(struct drm_mm *mm,
|
||||
unsigned long size,
|
||||
u64 size,
|
||||
unsigned alignment,
|
||||
unsigned long color,
|
||||
unsigned long start,
|
||||
unsigned long end);
|
||||
u64 start,
|
||||
u64 end);
|
||||
bool drm_mm_scan_add_block(struct drm_mm_node *node);
|
||||
bool drm_mm_scan_remove_block(struct drm_mm_node *node);
|
||||
|
||||
|
@ -249,7 +249,7 @@ struct ttm_buffer_object {
|
||||
* either of these locks held.
|
||||
*/
|
||||
|
||||
unsigned long offset;
|
||||
uint64_t offset; /* GPU address space is independent of CPU word size */
|
||||
uint32_t cur_placement;
|
||||
|
||||
struct sg_table *sg;
|
||||
|
@ -277,7 +277,7 @@ struct ttm_mem_type_manager {
|
||||
bool has_type;
|
||||
bool use_type;
|
||||
uint32_t flags;
|
||||
unsigned long gpu_offset;
|
||||
uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
|
||||
uint64_t size;
|
||||
uint32_t available_caching;
|
||||
uint32_t default_caching;
|
||||
|
Loading…
Reference in New Issue
Block a user