drm-misc-next for $kernel-version:

UAPI Changes:
 
 Cross-subsystem Changes:
   - dma-buf: Avoid a warning with some allocations, Remove
     DMA_FENCE_TRACE macros
 
 Core Changes:
   - bridge: New helper to git rid of panels in drivers
   - fence: Improve dma_fence_add_callback documentation, Improve
     dma_fence_ops->wait documentation
   - ioctl: Unexport drm_ioctl_permit
   - lease: Documentation improvements
   - fourcc: Add new macro to determine the modifier vendor
   - quirks: Add the Steam Deck, Chuwi HiBook, Chuwi Hi10 Pro, Samsung
     Galaxy Book 10.6, KD Kurio Smart C15200 2-in-1, Lenovo Ideapad D330
   - resv: Improve the documentation
   - shmem-helpers: Allocate WC pages on x86, Switch to vmf_insert_pfn
   - sched: Fix for a timer being canceled too soon, Avoid null pointer
     derefence if the fence is null in drm_sched_fence_free, Convert
     drivers to rely on its dependency tracking
   - ttm: Switch to kerneldoc, new helper to clear all DMA mappings, pool
     shrinker optitimization, Remove ttm_tt_destroy_common, Fix for
     unbinding on multiple drivers
 
 Driver Changes:
   - bochs: New PCI IDs
   - msm: Fence ordering impromevemnts
   - stm: Add layer alpha support, zpos
   - v3d: Fix for a Vulkan CTS failure
   - vc4: Conversion to the new bridge helpers
   - vgem: Use shmem helpers
   - virtio: Support mapping exported vram
   - zte: Remove obsolete driver
 
   - bridge: Probe improvements for it66121, enable DSI EOTP for anx7625,
     errors propagation improvements for anx7625
 
   - panels: 60fps mode for otm8009a, New driver for Samsung S6D27A1
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCYULyqgAKCRDj7w1vZxhR
 xVR1AP96dB3rfB0uIEvujMROBqupaKbYvP/7qilfMGIwLotDqQD/RKNB+EAaoHtT
 hRA7zmz7kwYA/l8PihmF1zoFddX21gA=
 =nFnK
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2021-09-16' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for $kernel-version:

UAPI Changes:

Cross-subsystem Changes:
  - dma-buf: Avoid a warning with some allocations, Remove
    DMA_FENCE_TRACE macros

Core Changes:
  - bridge: New helper to git rid of panels in drivers
  - fence: Improve dma_fence_add_callback documentation, Improve
    dma_fence_ops->wait documentation
  - ioctl: Unexport drm_ioctl_permit
  - lease: Documentation improvements
  - fourcc: Add new macro to determine the modifier vendor
  - quirks: Add the Steam Deck, Chuwi HiBook, Chuwi Hi10 Pro, Samsung
    Galaxy Book 10.6, KD Kurio Smart C15200 2-in-1, Lenovo Ideapad D330
  - resv: Improve the documentation
  - shmem-helpers: Allocate WC pages on x86, Switch to vmf_insert_pfn
  - sched: Fix for a timer being canceled too soon, Avoid null pointer
    derefence if the fence is null in drm_sched_fence_free, Convert
    drivers to rely on its dependency tracking
  - ttm: Switch to kerneldoc, new helper to clear all DMA mappings, pool
    shrinker optitimization, Remove ttm_tt_destroy_common, Fix for
    unbinding on multiple drivers

Driver Changes:
  - bochs: New PCI IDs
  - msm: Fence ordering impromevemnts
  - stm: Add layer alpha support, zpos
  - v3d: Fix for a Vulkan CTS failure
  - vc4: Conversion to the new bridge helpers
  - vgem: Use shmem helpers
  - virtio: Support mapping exported vram
  - zte: Remove obsolete driver

  - bridge: Probe improvements for it66121, enable DSI EOTP for anx7625,
    errors propagation improvements for anx7625

  - panels: 60fps mode for otm8009a, New driver for Samsung S6D27A1

Signed-off-by: Dave Airlie <airlied@redhat.com>

# gpg: Signature made Thu 16 Sep 2021 17:30:50 AEST
# gpg:                using EDDSA key 5C1337A45ECA9AEB89060E9EE3EF0D6F671851C5
# gpg: Can't check signature: No public key
From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20210916073132.ptbbmjetm7v3ufq3@gilmour
This commit is contained in:
Dave Airlie 2021-09-22 15:30:38 +10:00
commit 0dfc70818a
145 changed files with 2080 additions and 5441 deletions

View File

@ -22,7 +22,7 @@ properties:
items:
- enum:
# ili9341 240*320 Color on stm32f429-disco board
- st,sf-tc240t-9370-t
- st,sf-tc240t-9370-t
- const: ilitek,ili9341
reg: true

View File

@ -0,0 +1,98 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/samsung,s6d27a1.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung S6D27A1 display panel
description: The S6D27A1 is a 480x800 DPI display panel from Samsung Mobile
Displays (SMD). The panel must obey the rules for a SPI slave device
as specified in spi/spi-controller.yaml
maintainers:
- Markuss Broks <markuss.broks@gmail.com>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: samsung,s6d27a1
reg: true
interrupts:
description: provides an optional ESD (electrostatic discharge)
interrupt that signals abnormalities in the display hardware.
This can also be raised for other reasons like erroneous
configuration.
maxItems: 1
reset-gpios: true
vci-supply:
description: regulator that supplies the VCI analog voltage
usually around 3.0 V
vccio-supply:
description: regulator that supplies the VCCIO voltage usually
around 1.8 V
backlight: true
spi-cpha: true
spi-cpol: true
spi-max-frequency:
maximum: 1200000
port: true
required:
- compatible
- reg
- vci-supply
- vccio-supply
- spi-cpha
- spi-cpol
- port
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
spi {
compatible = "spi-gpio";
sck-gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
miso-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>;
mosi-gpios = <&gpio 2 GPIO_ACTIVE_HIGH>;
cs-gpios = <&gpio 3 GPIO_ACTIVE_HIGH>;
num-chipselects = <1>;
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "samsung,s6d27a1";
spi-max-frequency = <1200000>;
spi-cpha;
spi-cpol;
reg = <0>;
vci-supply = <&lcd_3v0_reg>;
vccio-supply = <&lcd_1v8_reg>;
reset-gpios = <&gpio 4 GPIO_ACTIVE_LOW>;
interrupt-parent = <&gpio>;
interrupts = <5 IRQ_TYPE_EDGE_RISING>;
port {
panel_in: endpoint {
remote-endpoint = <&display_out>;
};
};
};
};
...

View File

@ -176,12 +176,6 @@ DMA Fences Functions Reference
.. kernel-doc:: include/linux/dma-fence.h
:internal:
Seqno Hardware Fences
~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: include/linux/seqno-fence.h
:internal:
DMA Fence Array
~~~~~~~~~~~~~~~

View File

@ -28,56 +28,53 @@ UMA devices.
The Translation Table Manager (TTM)
===================================
TTM design background and information belongs here.
.. kernel-doc:: drivers/gpu/drm/ttm/ttm_module.c
:doc: TTM
TTM initialization
------------------
.. kernel-doc:: include/drm/ttm/ttm_caching.h
:internal:
**Warning**
This section is outdated.
TTM device object reference
---------------------------
Drivers wishing to support TTM must pass a filled :c:type:`ttm_bo_driver
<ttm_bo_driver>` structure to ttm_device_init, together with an
initialized global reference to the memory manager. The ttm_bo_driver
structure contains several fields with function pointers for
initializing the TTM, allocating and freeing memory, waiting for command
completion and fence synchronization, and memory migration.
.. kernel-doc:: include/drm/ttm/ttm_device.h
:internal:
The :c:type:`struct drm_global_reference <drm_global_reference>` is made
up of several fields:
.. kernel-doc:: drivers/gpu/drm/ttm/ttm_device.c
:export:
.. code-block:: c
TTM resource placement reference
--------------------------------
struct drm_global_reference {
enum ttm_global_types global_type;
size_t size;
void *object;
int (*init) (struct drm_global_reference *);
void (*release) (struct drm_global_reference *);
};
.. kernel-doc:: include/drm/ttm/ttm_placement.h
:internal:
TTM resource object reference
-----------------------------
There should be one global reference structure for your memory manager
as a whole, and there will be others for each object created by the
memory manager at runtime. Your global TTM should have a type of
TTM_GLOBAL_TTM_MEM. The size field for the global object should be
sizeof(struct ttm_mem_global), and the init and release hooks should
point at your driver-specific init and release routines, which probably
eventually call ttm_mem_global_init and ttm_mem_global_release,
respectively.
.. kernel-doc:: include/drm/ttm/ttm_resource.h
:internal:
Once your global TTM accounting structure is set up and initialized by
calling ttm_global_item_ref() on it, you need to create a buffer
object TTM to provide a pool for buffer object allocation by clients and
the kernel itself. The type of this object should be
TTM_GLOBAL_TTM_BO, and its size should be sizeof(struct
ttm_bo_global). Again, driver-specific init and release functions may
be provided, likely eventually calling ttm_bo_global_ref_init() and
ttm_bo_global_ref_release(), respectively. Also, like the previous
object, ttm_global_item_ref() is used to create an initial reference
count for the TTM, which will call your initialization function.
.. kernel-doc:: drivers/gpu/drm/ttm/ttm_resource.c
:export:
See the radeon_ttm.c file for an example of usage.
TTM TT object reference
-----------------------
.. kernel-doc:: include/drm/ttm/ttm_tt.h
:internal:
.. kernel-doc:: drivers/gpu/drm/ttm/ttm_tt.c
:export:
TTM page pool reference
-----------------------
.. kernel-doc:: include/drm/ttm/ttm_pool.h
:internal:
.. kernel-doc:: drivers/gpu/drm/ttm/ttm_pool.c
:export:
The Graphics Execution Manager (GEM)
====================================
@ -504,3 +501,6 @@ Scheduler Function References
.. kernel-doc:: drivers/gpu/drm/scheduler/sched_main.c
:export:
.. kernel-doc:: drivers/gpu/drm/scheduler/sched_entity.c
:export:

View File

@ -6018,6 +6018,12 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
F: drivers/gpu/drm/panel/panel-samsung-db7430.c
DRM DRIVER FOR SAMSUNG S6D27A1 PANELS
M: Markuss Broks <markuss.broks@gmail.com>
S: Maintained
F: Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml
F: driver/gpu/drm/panel/panel-samsung-s6d27a1.c
DRM DRIVER FOR SITRONIX ST7703 PANELS
M: Guido Günther <agx@sigxcpu.org>
R: Purism Kernel Team <kernel@puri.sm>

View File

@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
dma-resv.o seqno-fence.o
dma-resv.o
obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_SYNC_FILE) += sync_file.o

View File

@ -82,6 +82,7 @@ static void dma_buf_release(struct dentry *dentry)
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
WARN_ON(!list_empty(&dmabuf->attachments));
module_put(dmabuf->owner);
kfree(dmabuf->name);
kfree(dmabuf);

View File

@ -616,20 +616,17 @@ EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
* @cb: the callback to register
* @func: the function to call
*
* Add a software callback to the fence. The caller should keep a reference to
* the fence.
*
* @cb will be initialized by dma_fence_add_callback(), no initialization
* by the caller is required. Any number of callbacks can be registered
* to a fence, but a callback can only be registered to one fence at a time.
*
* Note that the callback can be called from an atomic context. If
* fence is already signaled, this function will return -ENOENT (and
* If fence is already signaled, this function will return -ENOENT (and
* *not* call the callback).
*
* Add a software callback to the fence. Same restrictions apply to
* refcount as it does to dma_fence_wait(), however the caller doesn't need to
* keep a refcount to fence afterward dma_fence_add_callback() has returned:
* when software access is enabled, the creator of the fence is required to keep
* the fence alive until after it signals with dma_fence_signal(). The callback
* itself can be called from irq context.
* Note that the callback can be called from an atomic context or irq context.
*
* Returns 0 in case of success, -ENOENT if the fence is already signaled
* and -EINVAL in case of error.

View File

@ -48,6 +48,8 @@
* write operations) or N shared fences (read operations). The RCU
* mechanism is used to protect read access to fences from locked
* write-side updates.
*
* See struct dma_resv for more details.
*/
DEFINE_WD_CLASS(reservation_ww_class);
@ -137,7 +139,11 @@ EXPORT_SYMBOL(dma_resv_fini);
* @num_fences: number of fences we want to add
*
* Should be called before dma_resv_add_shared_fence(). Must
* be called with obj->lock held.
* be called with @obj locked through dma_resv_lock().
*
* Note that the preallocated slots need to be re-reserved if @obj is unlocked
* at any time before calling dma_resv_add_shared_fence(). This is validated
* when CONFIG_DEBUG_MUTEXES is enabled.
*
* RETURNS
* Zero for success, or -errno
@ -234,8 +240,10 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max);
* @obj: the reservation object
* @fence: the shared fence to add
*
* Add a fence to a shared slot, obj->lock must be held, and
* Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
* dma_resv_reserve_shared() has been called.
*
* See also &dma_resv.fence for a discussion of the semantics.
*/
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
{
@ -278,9 +286,11 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
/**
* dma_resv_add_excl_fence - Add an exclusive fence.
* @obj: the reservation object
* @fence: the shared fence to add
* @fence: the exclusive fence to add
*
* Add a fence to the exclusive slot. The obj->lock must be held.
* Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
* Note that this function replaces all fences attached to @obj, see also
* &dma_resv.fence_excl for a discussion of the semantics.
*/
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
{
@ -609,9 +619,11 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
* fence
*
* Callers are not required to hold specific locks, but maybe hold
* dma_resv_lock() already
* dma_resv_lock() already.
*
* RETURNS
* true if all fences signaled, else false
*
* True if all fences signaled, else false.
*/
bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
{

View File

@ -40,11 +40,12 @@ struct dma_heap_attachment {
bool mapped;
};
#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
| __GFP_NORETRY) & ~__GFP_RECLAIM) \
| __GFP_COMP)
#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP};
static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
/*
* The selection of the orders used for allocation (1MB, 64K, 4K) is designed
* to match with the sizes often found in IOMMUs. Using order 4 pages instead

View File

@ -1,71 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* seqno-fence, using a dma-buf to synchronize fencing
*
* Copyright (C) 2012 Texas Instruments
* Copyright (C) 2012-2014 Canonical Ltd
* Authors:
* Rob Clark <robdclark@gmail.com>
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
*/
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/seqno-fence.h>
static const char *seqno_fence_get_driver_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_driver_name(fence);
}
static const char *seqno_fence_get_timeline_name(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->get_timeline_name(fence);
}
static bool seqno_enable_signaling(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->enable_signaling(fence);
}
static bool seqno_signaled(struct dma_fence *fence)
{
struct seqno_fence *seqno_fence = to_seqno_fence(fence);
return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
}
static void seqno_release(struct dma_fence *fence)
{
struct seqno_fence *f = to_seqno_fence(fence);
dma_buf_put(f->sync_buf);
if (f->ops->release)
f->ops->release(fence);
else
dma_fence_free(&f->base);
}
static signed long seqno_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct seqno_fence *f = to_seqno_fence(fence);
return f->ops->wait(fence, intr, timeout);
}
const struct dma_fence_ops seqno_fence_ops = {
.get_driver_name = seqno_fence_get_driver_name,
.get_timeline_name = seqno_fence_get_timeline_name,
.enable_signaling = seqno_enable_signaling,
.signaled = seqno_signaled,
.wait = seqno_wait,
.release = seqno_release,
};
EXPORT_SYMBOL(seqno_fence_ops);

View File

@ -211,7 +211,7 @@ config DRM_KMS_CMA_HELPER
config DRM_GEM_SHMEM_HELPER
bool
depends on DRM
depends on DRM && MMU
help
Choose this if you need the GEM shmem helper functions
@ -271,7 +271,8 @@ source "drivers/gpu/drm/kmb/Kconfig"
config DRM_VGEM
tristate "Virtual GEM provider"
depends on DRM
depends on DRM && MMU
select DRM_GEM_SHMEM_HELPER
help
Choose this option to get a virtual graphics memory manager,
as used by Mesa's software renderer for enhanced performance.
@ -279,7 +280,7 @@ config DRM_VGEM
config DRM_VKMS
tristate "Virtual KMS (EXPERIMENTAL)"
depends on DRM
depends on DRM && MMU
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select CRC32
@ -351,8 +352,6 @@ source "drivers/gpu/drm/hisilicon/Kconfig"
source "drivers/gpu/drm/mediatek/Kconfig"
source "drivers/gpu/drm/zte/Kconfig"
source "drivers/gpu/drm/mxsfb/Kconfig"
source "drivers/gpu/drm/meson/Kconfig"

View File

@ -113,7 +113,6 @@ obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-y += hisilicon/
obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/

View File

@ -1222,6 +1222,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
if (r)
goto error_unlock;
drm_sched_job_arm(&job->base);
/* No memory allocation is allowed while holding the notifier lock.
* The lock is held until amdgpu_cs_submit is finished and fence is
* added to BOs.
@ -1259,7 +1261,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
trace_amdgpu_cs_ioctl(job);
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
drm_sched_entity_push_job(&job->base, entity);
drm_sched_entity_push_job(&job->base);
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);

View File

@ -266,7 +266,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
struct amdgpu_fence_driver *drv = &ring->fence_drv;
struct amdgpu_device *adev = ring->adev;
uint32_t seq, last_seq;
int r;
do {
last_seq = atomic_read(&ring->fence_drv.last_seq);
@ -298,12 +297,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
r = dma_fence_signal(fence);
if (!r)
DMA_FENCE_TRACE(fence, "signaled from irq context\n");
else
BUG();
dma_fence_signal(fence);
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@ -684,8 +678,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx);
return true;
}

View File

@ -182,9 +182,11 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
if (r)
return r;
drm_sched_job_arm(&job->base);
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
drm_sched_entity_push_job(&job->base, entity);
drm_sched_entity_push_job(&job->base);
return 0;
}

View File

@ -1066,8 +1066,6 @@ static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
amdgpu_ttm_backend_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
if (gtt->usertask)
put_task_struct(gtt->usertask);
@ -1148,6 +1146,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_device *adev;
amdgpu_ttm_backend_unbind(bdev, ttm);
if (gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);

View File

@ -165,7 +165,7 @@ bool malidp_format_mod_supported(struct drm_device *drm,
return !malidp_hw_format_is_afbc_only(format);
}
if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
if (!fourcc_mod_is_vendor(modifier, ARM)) {
DRM_ERROR("Unknown modifier (not Arm)\n");
return false;
}

View File

@ -1,21 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* adv7511_cec.c - Analog Devices ADV7511/33 cec driver
*
* Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/device.h>

View File

@ -720,7 +720,7 @@ static int edid_read(struct anx7625_data *ctx,
ret = sp_tx_aux_rd(ctx, 0xf1);
if (ret) {
sp_tx_rst_aux(ctx);
ret = sp_tx_rst_aux(ctx);
DRM_DEV_DEBUG_DRIVER(dev, "edid read fail, reset!\n");
} else {
ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
@ -735,7 +735,7 @@ static int edid_read(struct anx7625_data *ctx,
if (cnt > EDID_TRY_CNT)
return -EIO;
return 0;
return ret;
}
static int segments_edid_read(struct anx7625_data *ctx,
@ -785,7 +785,7 @@ static int segments_edid_read(struct anx7625_data *ctx,
if (cnt > EDID_TRY_CNT)
return -EIO;
return 0;
return ret;
}
static int sp_tx_edid_read(struct anx7625_data *ctx,
@ -845,8 +845,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
if (g_edid_break == 1)
break;
segments_edid_read(ctx, count / 2,
pblock_buf, offset);
ret = segments_edid_read(ctx, count / 2,
pblock_buf, offset);
if (ret < 0)
return ret;
memcpy(&pedid_blocks_buf[edid_pos],
pblock_buf,
MAX_DPCD_BUFFER_SIZE);
@ -863,8 +866,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
if (g_edid_break == 1)
break;
segments_edid_read(ctx, count / 2,
pblock_buf, offset);
ret = segments_edid_read(ctx, count / 2,
pblock_buf, offset);
if (ret < 0)
return ret;
memcpy(&pedid_blocks_buf[edid_pos],
pblock_buf,
MAX_DPCD_BUFFER_SIZE);
@ -887,7 +893,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
}
/* Reset aux channel */
sp_tx_rst_aux(ctx);
ret = sp_tx_rst_aux(ctx);
if (ret < 0) {
DRM_DEV_ERROR(dev, "Failed to reset aux channel!\n");
return ret;
}
return (blocks_num + 1);
}
@ -1325,7 +1335,6 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_MODE_VIDEO_HSE;
if (mipi_dsi_attach(dsi) < 0) {

View File

@ -1171,7 +1171,6 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
{
struct cdns_dsi *dsi;
struct cdns_dsi_input *input;
struct resource *res;
int ret, irq;
u32 val;
@ -1183,8 +1182,7 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
input = &dsi->input;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(&pdev->dev, res);
dsi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->regs))
return PTR_ERR(dsi->regs);

View File

@ -889,7 +889,7 @@ unlock:
static int it66121_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
u32 vendor_ids[2], device_ids[2], revision_id;
u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 };
struct device_node *ep;
int ret;
struct it66121_ctx *ctx;
@ -924,6 +924,9 @@ static int it66121_probe(struct i2c_client *client,
ctx->next_bridge = of_drm_find_bridge(ep);
of_node_put(ep);
if (!ctx->next_bridge)
return -EPROBE_DEFER;
i2c_set_clientdata(client, ctx);
mutex_init(&ctx->lock);

View File

@ -18,16 +18,18 @@
#include <drm/drm_print.h>
#define PAGE2_GPIO_H 0xa7
#define PS_GPIO9 BIT(1)
#define PS_GPIO9 BIT(1)
#define PAGE2_I2C_BYPASS 0xea
#define I2C_BYPASS_EN 0xd0
#define I2C_BYPASS_EN 0xd0
#define PAGE2_MCS_EN 0xf3
#define MCS_EN BIT(0)
#define MCS_EN BIT(0)
#define PAGE3_SET_ADD 0xfe
#define VDO_CTL_ADD 0x13
#define VDO_DIS 0x18
#define VDO_EN 0x1c
#define DP_NUM_LANES 4
#define VDO_CTL_ADD 0x13
#define VDO_DIS 0x18
#define VDO_EN 0x1c
#define NUM_MIPI_LANES 4
/*
* PS8640 uses multiple addresses:
@ -254,7 +256,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->lanes = DP_NUM_LANES;
dsi->lanes = NUM_MIPI_LANES;
ret = mipi_dsi_attach(dsi);
if (ret)
goto err_dsi_attach;

View File

@ -28,6 +28,7 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
@ -51,10 +52,8 @@
*
* Display drivers are responsible for linking encoders with the first bridge
* in the chains. This is done by acquiring the appropriate bridge with
* of_drm_find_bridge() or drm_of_find_panel_or_bridge(), or creating it for a
* panel with drm_panel_bridge_add_typed() (or the managed version
* devm_drm_panel_bridge_add_typed()). Once acquired, the bridge shall be
* attached to the encoder with a call to drm_bridge_attach().
* devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
* encoder with a call to drm_bridge_attach().
*
* Bridges are responsible for linking themselves with the next bridge in the
* chain, if any. This is done the same way as for encoders, with the call to
@ -1233,6 +1232,40 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
return NULL;
}
EXPORT_SYMBOL(of_drm_find_bridge);
/**
* devm_drm_of_get_bridge - Return next bridge in the chain
* @dev: device to tie the bridge lifetime to
* @np: device tree node containing encoder output ports
* @port: port in the device tree node
* @endpoint: endpoint in the device tree node
*
* Given a DT node's port and endpoint number, finds the connected node
* and returns the associated bridge if any, or creates and returns a
* drm panel bridge instance if a panel is connected.
*
* Returns a pointer to the bridge if successful, or an error pointer
* otherwise.
*/
struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
struct device_node *np,
u32 port, u32 endpoint)
{
struct drm_bridge *bridge;
struct drm_panel *panel;
int ret;
ret = drm_of_find_panel_or_bridge(np, port, endpoint,
&panel, &bridge);
if (ret)
return ERR_PTR(ret);
if (panel)
bridge = devm_drm_panel_bridge_add(dev, panel);
return bridge;
}
EXPORT_SYMBOL(devm_drm_of_get_bridge);
#endif
MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");

View File

@ -65,6 +65,14 @@
* support can instead use e.g. drm_helper_hpd_irq_event().
*/
/*
* Global connector list for drm_connector_find_by_fwnode().
* Note drm_connector_[un]register() first take connector->lock and then
* take the connector_list_lock.
*/
static DEFINE_MUTEX(connector_list_lock);
static LIST_HEAD(connector_list);
struct drm_conn_prop_enum_list {
int type;
const char *name;
@ -267,6 +275,7 @@ int drm_connector_init(struct drm_device *dev,
goto out_put_type_id;
}
INIT_LIST_HEAD(&connector->global_connector_list_entry);
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
@ -474,6 +483,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
drm_mode_object_unregister(dev, &connector->base);
kfree(connector->name);
connector->name = NULL;
fwnode_handle_put(connector->fwnode);
connector->fwnode = NULL;
spin_lock_irq(&dev->mode_config.connector_list_lock);
list_del(&connector->head);
dev->mode_config.num_connector--;
@ -532,6 +543,9 @@ int drm_connector_register(struct drm_connector *connector)
/* Let userspace know we have a new connector */
drm_sysfs_hotplug_event(connector->dev);
mutex_lock(&connector_list_lock);
list_add_tail(&connector->global_connector_list_entry, &connector_list);
mutex_unlock(&connector_list_lock);
goto unlock;
err_debugfs:
@ -560,6 +574,10 @@ void drm_connector_unregister(struct drm_connector *connector)
return;
}
mutex_lock(&connector_list_lock);
list_del_init(&connector->global_connector_list_entry);
mutex_unlock(&connector_list_lock);
if (connector->funcs->early_unregister)
connector->funcs->early_unregister(connector);
@ -2543,6 +2561,67 @@ out:
return ret;
}
/**
* drm_connector_find_by_fwnode - Find a connector based on the associated fwnode
* @fwnode: fwnode for which to find the matching drm_connector
*
* This functions looks up a drm_connector based on its associated fwnode. When
* a connector is found a reference to the connector is returned. The caller must
* call drm_connector_put() to release this reference when it is done with the
* connector.
*
* Returns: A reference to the found connector or an ERR_PTR().
*/
struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode)
{
struct drm_connector *connector, *found = ERR_PTR(-ENODEV);
if (!fwnode)
return ERR_PTR(-ENODEV);
mutex_lock(&connector_list_lock);
list_for_each_entry(connector, &connector_list, global_connector_list_entry) {
if (connector->fwnode == fwnode ||
(connector->fwnode && connector->fwnode->secondary == fwnode)) {
drm_connector_get(connector);
found = connector;
break;
}
}
mutex_unlock(&connector_list_lock);
return found;
}
/**
* drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector
* @connector: connector to report the event on
*
* On some hardware a hotplug event notification may come from outside the display
* driver / device. An example of this is some USB Type-C setups where the hardware
* muxes the DisplayPort data and aux-lines but does not pass the altmode HPD
* status bit to the GPU's DP HPD pin.
*
* This function can be used to report these out-of-band events after obtaining
* a drm_connector reference through calling drm_connector_find_by_fwnode().
*/
void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode)
{
struct drm_connector *connector;
connector = drm_connector_find_by_fwnode(connector_fwnode);
if (IS_ERR(connector))
return;
if (connector->funcs->oob_hotplug_event)
connector->funcs->oob_hotplug_event(connector);
drm_connector_put(connector);
}
EXPORT_SYMBOL(drm_connector_oob_hotplug_event);
/**
* DOC: Tile group

View File

@ -58,6 +58,7 @@ struct drm_property;
struct edid;
struct kref;
struct work_struct;
struct fwnode_handle;
/* drm_crtc.c */
int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
@ -186,6 +187,7 @@ int drm_connector_set_obj_prop(struct drm_mode_object *obj,
int drm_connector_create_standard_properties(struct drm_device *dev);
const char *drm_get_connector_force_name(enum drm_connector_force force);
void drm_connector_free_work_fn(struct work_struct *work);
struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode);
/* IOCTL */
int drm_connector_property_set_ioctl(struct drm_device *dev,

View File

@ -10,6 +10,10 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@ -162,6 +166,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
return PTR_ERR(pages);
}
/*
* TODO: Allocating WC pages which are correctly flushed is only
* supported on x86. Ideal solution would be a GFP_WC flag, which also
* ttm_pool.c could use.
*/
#ifdef CONFIG_X86
if (shmem->map_wc)
set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
#endif
shmem->pages = pages;
return 0;
@ -203,6 +217,11 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
if (--shmem->pages_use_count > 0)
return;
#ifdef CONFIG_X86
if (shmem->map_wc)
set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
#endif
drm_gem_put_pages(obj, shmem->pages,
shmem->pages_mark_dirty_on_put,
shmem->pages_mark_accessed_on_put);
@ -542,7 +561,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
} else {
page = shmem->pages[page_offset];
ret = vmf_insert_page(vma, vmf->address, page);
ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
}
mutex_unlock(&shmem->pages_lock);
@ -612,7 +631,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
return ret;
}
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);

View File

@ -846,7 +846,6 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
{
ttm_tt_destroy_common(bdev, tt);
ttm_tt_fini(tt);
kfree(tt);
}

View File

@ -522,19 +522,7 @@ int drm_version(struct drm_device *dev, void *data,
return err;
}
/**
* drm_ioctl_permit - Check ioctl permissions against caller
*
* @flags: ioctl permission flags.
* @file_priv: Pointer to struct drm_file identifying the caller.
*
* Checks whether the caller is allowed to run an ioctl with the
* indicated permissions.
*
* Returns:
* Zero if allowed, -EACCES otherwise.
*/
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
@ -557,7 +545,6 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return 0;
}
EXPORT_SYMBOL(drm_ioctl_permit);
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
[DRM_IOCTL_NR(ioctl)] = { \
@ -725,7 +712,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE(drm_ioctls)
/**
* DOC: driver specific ioctls
@ -834,8 +821,8 @@ long drm_ioctl(struct file *filp,
if (drm_dev_is_unplugged(dev))
return -ENODEV;
if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
return -ENOTTY;
if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
return -ENOTTY;
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;

View File

@ -64,17 +64,6 @@ MODULE_PARM_DESC(edid_firmware,
static int __init drm_kms_helper_init(void)
{
/*
* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable
* console.
*/
if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) &&
!IS_ENABLED(CONFIG_EXPERT))
request_module_nowait("fbcon");
return drm_dp_aux_dev_init();
}

View File

@ -231,6 +231,9 @@ EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
* return either the associated struct drm_panel or drm_bridge device. Either
* @panel or @bridge must not be NULL.
*
* This function is deprecated and should not be used in new drivers. Use
* devm_drm_of_get_bridge() instead.
*
* Returns zero if successful, or one of the standard error codes if it fails.
*/
int drm_of_find_panel_or_bridge(const struct device_node *np,

View File

@ -109,6 +109,12 @@ static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = {
.width = 1280,
.height = 1920,
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@ -134,6 +140,20 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Chuwi HiBook (CWI514) */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
/* Above matches are too generic, add bios-date match */
DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Chuwi Hi10 Pro (CWI529) */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
@ -193,6 +213,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"),
},
.driver_data = (void *)&itworks_tw891,
}, { /* KD Kurio Smart C15200 2-in-1 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "KD Interactive"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Kurio Smart"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "KDM960BCP"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /*
* Lenovo Ideapad Miix 310 laptop, only some production batches
* have a portrait screen, the resolution checks makes the quirk
@ -211,10 +238,15 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Lenovo Ideapad D330 */
}, { /* Lenovo Ideapad D330-10IGM (HD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Lenovo Ideapad D330-10IGM (FHD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
@ -225,6 +257,19 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
},
.driver_data = (void *)&onegx1_pro,
}, { /* Samsung GalaxyBook 10.6 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"),
},
.driver_data = (void *)&lcd1280x1920_rightside_up,
}, { /* Valve Steam Deck */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),

View File

@ -10,6 +10,7 @@
* Copyright (c) 2003-2004 IBM Corp.
*/
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
@ -50,8 +51,45 @@ static struct device_type drm_sysfs_device_minor = {
.name = "drm_minor"
};
static struct device_type drm_sysfs_device_connector = {
.name = "drm_connector",
};
struct class *drm_class;
#ifdef CONFIG_ACPI
static bool drm_connector_acpi_bus_match(struct device *dev)
{
return dev->type == &drm_sysfs_device_connector;
}
static struct acpi_device *drm_connector_acpi_find_companion(struct device *dev)
{
struct drm_connector *connector = to_drm_connector(dev);
return to_acpi_device_node(connector->fwnode);
}
static struct acpi_bus_type drm_connector_acpi_bus = {
.name = "drm_connector",
.match = drm_connector_acpi_bus_match,
.find_companion = drm_connector_acpi_find_companion,
};
static void drm_sysfs_acpi_register(void)
{
register_acpi_bus_type(&drm_connector_acpi_bus);
}
static void drm_sysfs_acpi_unregister(void)
{
unregister_acpi_bus_type(&drm_connector_acpi_bus);
}
#else
static void drm_sysfs_acpi_register(void) { }
static void drm_sysfs_acpi_unregister(void) { }
#endif
static char *drm_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
@ -85,6 +123,8 @@ int drm_sysfs_init(void)
}
drm_class->devnode = drm_devnode;
drm_sysfs_acpi_register();
return 0;
}
@ -97,11 +137,17 @@ void drm_sysfs_destroy(void)
{
if (IS_ERR_OR_NULL(drm_class))
return;
drm_sysfs_acpi_unregister();
class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
drm_class = NULL;
}
static void drm_sysfs_release(struct device *dev)
{
kfree(dev);
}
/*
* Connector properties
*/
@ -273,27 +319,47 @@ static const struct attribute_group *connector_dev_groups[] = {
int drm_sysfs_connector_add(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct device *kdev;
int r;
if (connector->kdev)
return 0;
connector->kdev =
device_create_with_groups(drm_class, dev->primary->kdev, 0,
connector, connector_dev_groups,
"card%d-%s", dev->primary->index,
connector->name);
kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
if (!kdev)
return -ENOMEM;
device_initialize(kdev);
kdev->class = drm_class;
kdev->type = &drm_sysfs_device_connector;
kdev->parent = dev->primary->kdev;
kdev->groups = connector_dev_groups;
kdev->release = drm_sysfs_release;
dev_set_drvdata(kdev, connector);
r = dev_set_name(kdev, "card%d-%s", dev->primary->index, connector->name);
if (r)
goto err_free;
DRM_DEBUG("adding \"%s\" to sysfs\n",
connector->name);
if (IS_ERR(connector->kdev)) {
DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
return PTR_ERR(connector->kdev);
r = device_add(kdev);
if (r) {
drm_err(dev, "failed to register connector device: %d\n", r);
goto err_free;
}
connector->kdev = kdev;
if (connector->ddc)
return sysfs_create_link(&connector->kdev->kobj,
&connector->ddc->dev.kobj, "ddc");
return 0;
err_free:
put_device(kdev);
return r;
}
void drm_sysfs_connector_remove(struct drm_connector *connector)
@ -374,11 +440,6 @@ void drm_sysfs_connector_status_event(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_sysfs_connector_status_event);
static void drm_sysfs_release(struct device *dev)
{
kfree(dev);
}
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
const char *minor_str;

View File

@ -163,6 +163,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
if (ret)
goto out_unlock;
drm_sched_job_arm(&submit->sched_job);
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
submit->out_fence, 0,
@ -176,7 +178,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
/* the scheduler holds on to the job now */
kref_get(&submit->refcount);
drm_sched_entity_push_job(&submit->sched_job, sched_entity);
drm_sched_entity_push_job(&submit->sched_job);
out_unlock:
mutex_unlock(&submit->gpu->fence_lock);

View File

@ -2,7 +2,7 @@
config DRM_GUD
tristate "GUD USB Display"
depends on DRM && USB
depends on DRM && USB && MMU
select LZ4_COMPRESS
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER

View File

@ -214,7 +214,6 @@ static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm);
kfree(i915_tt);
}

View File

@ -267,7 +267,9 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
if (explicit)
return 0;
return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
return drm_sched_job_add_implicit_dependencies(&task->base,
&bo->base.base,
write);
}
static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
@ -285,7 +287,7 @@ static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
if (err)
return err;
err = drm_gem_fence_array_add(&submit->task->deps, fence);
err = drm_sched_job_add_dependency(&submit->task->base, fence);
if (err) {
dma_fence_put(fence);
return err;
@ -359,8 +361,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
goto err_out2;
}
fence = lima_sched_context_queue_task(
submit->ctx->context + submit->pipe, submit->task);
fence = lima_sched_context_queue_task(submit->task);
for (i = 0; i < submit->nr_bos; i++) {
if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)

View File

@ -129,27 +129,20 @@ int lima_sched_task_init(struct lima_sched_task *task,
return err;
}
drm_sched_job_arm(&task->base);
task->num_bos = num_bos;
task->vm = lima_vm_get(vm);
xa_init_flags(&task->deps, XA_FLAGS_ALLOC);
return 0;
}
void lima_sched_task_fini(struct lima_sched_task *task)
{
struct dma_fence *fence;
unsigned long index;
int i;
drm_sched_job_cleanup(&task->base);
xa_for_each(&task->deps, index, fence) {
dma_fence_put(fence);
}
xa_destroy(&task->deps);
if (task->bos) {
for (i = 0; i < task->num_bos; i++)
drm_gem_object_put(&task->bos[i]->base.base);
@ -175,27 +168,15 @@ void lima_sched_context_fini(struct lima_sched_pipe *pipe,
drm_sched_entity_fini(&context->base);
}
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
struct lima_sched_task *task)
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
{
struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
trace_lima_task_submit(task);
drm_sched_entity_push_job(&task->base, &context->base);
drm_sched_entity_push_job(&task->base);
return fence;
}
static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
struct drm_sched_entity *entity)
{
struct lima_sched_task *task = to_lima_task(job);
if (!xa_empty(&task->deps))
return xa_erase(&task->deps, task->last_dep++);
return NULL;
}
static int lima_pm_busy(struct lima_device *ldev)
{
int ret;
@ -471,7 +452,6 @@ static void lima_sched_free_job(struct drm_sched_job *job)
}
static const struct drm_sched_backend_ops lima_sched_ops = {
.dependency = lima_sched_dependency,
.run_job = lima_sched_run_job,
.timedout_job = lima_sched_timedout_job,
.free_job = lima_sched_free_job,

View File

@ -23,9 +23,6 @@ struct lima_sched_task {
struct lima_vm *vm;
void *frame;
struct xarray deps;
unsigned long last_dep;
struct lima_bo **bos;
int num_bos;
@ -98,8 +95,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
atomic_t *guilty);
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
struct lima_sched_context *context);
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
struct lima_sched_task *task);
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task);
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);

View File

@ -276,7 +276,6 @@ static int mcde_probe(struct platform_device *pdev)
struct drm_device *drm;
struct mcde *mcde;
struct component_match *match = NULL;
struct resource *res;
u32 pid;
int irq;
int ret;
@ -344,8 +343,7 @@ static int mcde_probe(struct platform_device *pdev)
goto clk_disable;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mcde->regs = devm_ioremap_resource(dev, res);
mcde->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mcde->regs)) {
dev_err(dev, "no MCDE regs\n");
ret = -EINVAL;

View File

@ -1169,7 +1169,6 @@ static int mcde_dsi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mcde_dsi *d;
struct mipi_dsi_host *host;
struct resource *res;
u32 dsi_id;
int ret;
@ -1187,8 +1186,7 @@ static int mcde_dsi_probe(struct platform_device *pdev)
return PTR_ERR(d->prcmu);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
d->regs = devm_ioremap_resource(dev, res);
d->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(d->regs))
return PTR_ERR(d->regs);

View File

@ -206,8 +206,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
priv->compat = match->compat;
priv->afbcd.ops = match->afbcd_ops;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
regs = devm_ioremap_resource(dev, res);
regs = devm_platform_ioremap_resource_byname(pdev, "vpu");
if (IS_ERR(regs)) {
ret = PTR_ERR(regs);
goto free_drm;

View File

@ -978,7 +978,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
struct dw_hdmi_plat_data *dw_plat_data;
struct drm_bridge *next_bridge;
struct drm_encoder *encoder;
struct resource *res;
int irq;
int ret;
@ -1042,8 +1041,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(meson_dw_hdmi->hdmitx_phy);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
meson_dw_hdmi->hdmitx = devm_ioremap_resource(dev, res);
meson_dw_hdmi->hdmitx = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(meson_dw_hdmi->hdmitx))
return PTR_ERR(meson_dw_hdmi->hdmitx);

View File

@ -309,11 +309,6 @@ struct msm_gem_submit {
struct ww_acquire_ctx ticket;
uint32_t seqno; /* Sequence number of the submit on the ring */
/* Array of struct dma_fence * to block on before submitting this job.
*/
struct xarray deps;
unsigned long last_dep;
/* Hw fence, which is created when the scheduler executes the job, and
* is signaled when the hw finishes (via seqno write from cmdstream)
*/

View File

@ -52,8 +52,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return ERR_PTR(ret);
}
xa_init_flags(&submit->deps, XA_FLAGS_ALLOC);
kref_init(&submit->ref);
submit->dev = dev;
submit->aspace = queue->ctx->aspace;
@ -72,8 +70,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
{
struct msm_gem_submit *submit =
container_of(kref, struct msm_gem_submit, ref);
unsigned long index;
struct dma_fence *fence;
unsigned i;
if (submit->fence_id) {
@ -82,12 +78,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
mutex_unlock(&submit->queue->lock);
}
xa_for_each (&submit->deps, index, fence) {
dma_fence_put(fence);
}
xa_destroy(&submit->deps);
dma_fence_put(submit->user_fence);
dma_fence_put(submit->hw_fence);
@ -340,11 +330,13 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
return ret;
}
if (no_implicit)
/* exclusive fences must be ordered */
if (no_implicit && !write)
continue;
ret = drm_gem_fence_array_add_implicit(&submit->deps, obj,
write);
ret = drm_sched_job_add_implicit_dependencies(&submit->base,
obj,
write);
if (ret)
break;
}
@ -588,7 +580,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
if (ret)
break;
ret = drm_gem_fence_array_add(&submit->deps, fence);
ret = drm_sched_job_add_dependency(&submit->base, fence);
if (ret)
break;
@ -798,7 +790,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock;
}
ret = drm_gem_fence_array_add(&submit->deps, in_fence);
ret = drm_sched_job_add_dependency(&submit->base, in_fence);
if (ret)
goto out_unlock;
}
@ -878,6 +870,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
drm_sched_job_arm(&submit->base);
submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
/*
@ -889,17 +883,16 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (submit->fence_id < 0) {
ret = submit->fence_id = 0;
submit->fence_id = 0;
goto out;
}
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
struct sync_file *sync_file = sync_file_create(submit->user_fence);
if (!sync_file) {
ret = -ENOMEM;
goto out;
} else {
fd_install(out_fence_fd, sync_file->file);
args->fence_fd = out_fence_fd;
}
fd_install(out_fence_fd, sync_file->file);
args->fence_fd = out_fence_fd;
}
submit_attach_object_fences(submit);
@ -907,7 +900,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* The scheduler owns a ref now: */
msm_gem_submit_get(submit);
drm_sched_entity_push_job(&submit->base, &queue->entity);
drm_sched_entity_push_job(&submit->base);
args->fence = submit->fence_id;

View File

@ -11,17 +11,6 @@ static uint num_hw_submissions = 8;
MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
module_param(num_hw_submissions, uint, 0600);
static struct dma_fence *msm_job_dependency(struct drm_sched_job *job,
struct drm_sched_entity *s_entity)
{
struct msm_gem_submit *submit = to_msm_submit(job);
if (!xa_empty(&submit->deps))
return xa_erase(&submit->deps, submit->last_dep++);
return NULL;
}
static struct dma_fence *msm_job_run(struct drm_sched_job *job)
{
struct msm_gem_submit *submit = to_msm_submit(job);
@ -52,7 +41,6 @@ static void msm_job_free(struct drm_sched_job *job)
}
const struct drm_sched_backend_ops msm_sched_ops = {
.dependency = msm_job_dependency,
.run_job = msm_job_run,
.free_job = msm_job_free
};

View File

@ -1277,6 +1277,8 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
if (slave)
return;
nouveau_ttm_tt_unbind(bdev, ttm);
drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
@ -1290,8 +1292,6 @@ nouveau_ttm_tt_destroy(struct ttm_device *bdev,
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
if (drm->agp.bridge) {
ttm_agp_unbind(ttm);
ttm_tt_destroy_common(bdev, ttm);
ttm_agp_destroy(ttm);
return;
}

View File

@ -21,8 +21,6 @@ nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (ttm) {
nouveau_sgdma_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(&nvbe->ttm);
kfree(nvbe);
}

View File

@ -3,7 +3,6 @@ config DRM_OMAP
tristate "OMAP DRM"
depends on DRM
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
select OMAP2_DSS
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select HDMI

View File

@ -392,6 +392,17 @@ config DRM_PANEL_SAMSUNG_S6D16D0
depends on DRM_MIPI_DSI
select VIDEOMODE_HELPERS
config DRM_PANEL_SAMSUNG_S6D27A1
tristate "Samsung S6D27A1 DPI panel driver"
depends on OF && SPI && GPIOLIB
select DRM_MIPI_DBI
help
Say Y here if you want to enable support for the Samsung
S6D27A1 DPI 480x800 panel.
This panel can be found in Samsung Galaxy Ace 2
GT-I8160 mobile phone.
config DRM_PANEL_SAMSUNG_S6E3HA2
tristate "Samsung S6E3HA2 DSI video mode panel"
depends on OF

View File

@ -39,6 +39,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o

View File

@ -60,6 +60,9 @@
#define MCS_CMD2_ENA1 0xFF00 /* Enable Access Command2 "CMD2" */
#define MCS_CMD2_ENA2 0xFF80 /* Enable Access Orise Command2 */
#define OTM8009A_HDISPLAY 480
#define OTM8009A_VDISPLAY 800
struct otm8009a {
struct device *dev;
struct drm_panel panel;
@ -70,19 +73,35 @@ struct otm8009a {
bool enabled;
};
static const struct drm_display_mode default_mode = {
.clock = 29700,
.hdisplay = 480,
.hsync_start = 480 + 98,
.hsync_end = 480 + 98 + 32,
.htotal = 480 + 98 + 32 + 98,
.vdisplay = 800,
.vsync_start = 800 + 15,
.vsync_end = 800 + 15 + 10,
.vtotal = 800 + 15 + 10 + 14,
.flags = 0,
.width_mm = 52,
.height_mm = 86,
static const struct drm_display_mode modes[] = {
{ /* 50 Hz, preferred */
.clock = 29700,
.hdisplay = 480,
.hsync_start = 480 + 98,
.hsync_end = 480 + 98 + 32,
.htotal = 480 + 98 + 32 + 98,
.vdisplay = 800,
.vsync_start = 800 + 15,
.vsync_end = 800 + 15 + 10,
.vtotal = 800 + 15 + 10 + 14,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 52,
.height_mm = 86,
},
{ /* 60 Hz */
.clock = 33000,
.hdisplay = 480,
.hsync_start = 480 + 70,
.hsync_end = 480 + 70 + 32,
.htotal = 480 + 70 + 32 + 72,
.vdisplay = 800,
.vsync_start = 800 + 15,
.vsync_end = 800 + 15 + 10,
.vtotal = 800 + 15 + 10 + 16,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 52,
.height_mm = 86,
},
};
static inline struct otm8009a *panel_to_otm8009a(struct drm_panel *panel)
@ -208,12 +227,11 @@ static int otm8009a_init_sequence(struct otm8009a *ctx)
/* Default portrait 480x800 rgb24 */
dcs_write_seq(ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
ret = mipi_dsi_dcs_set_column_address(dsi, 0,
default_mode.hdisplay - 1);
ret = mipi_dsi_dcs_set_column_address(dsi, 0, OTM8009A_HDISPLAY - 1);
if (ret)
return ret;
ret = mipi_dsi_dcs_set_page_address(dsi, 0, default_mode.vdisplay - 1);
ret = mipi_dsi_dcs_set_page_address(dsi, 0, OTM8009A_VDISPLAY - 1);
if (ret)
return ret;
@ -337,24 +355,33 @@ static int otm8009a_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
unsigned int num_modes = ARRAY_SIZE(modes);
unsigned int i;
mode = drm_mode_duplicate(connector->dev, &default_mode);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
drm_mode_vrefresh(&default_mode));
return -ENOMEM;
for (i = 0; i < num_modes; i++) {
mode = drm_mode_duplicate(connector->dev, &modes[i]);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
modes[i].hdisplay,
modes[i].vdisplay,
drm_mode_vrefresh(&modes[i]));
return -ENOMEM;
}
mode->type = DRM_MODE_TYPE_DRIVER;
/* Setting first mode as preferred */
if (!i)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
}
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
return 1;
return num_modes;
}
static const struct drm_panel_funcs otm8009a_drm_funcs = {

View File

@ -0,0 +1,320 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Panel driver for the Samsung S6D27A1 480x800 DPI RGB panel.
* Found in the Samsung Galaxy Ace 2 GT-I8160 mobile phone.
*/
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
#define S6D27A1_PASSWD_L2 0xF0 /* Password Command for Level 2 Control */
#define S6D27A1_RESCTL 0xB3 /* Resolution Select Control */
#define S6D27A1_PANELCTL2 0xB4 /* ASG Signal Control */
#define S6D27A1_READID1 0xDA /* Read panel ID 1 */
#define S6D27A1_READID2 0xDB /* Read panel ID 2 */
#define S6D27A1_READID3 0xDC /* Read panel ID 3 */
#define S6D27A1_DISPCTL 0xF2 /* Display Control */
#define S6D27A1_MANPWR 0xF3 /* Manual Control */
#define S6D27A1_PWRCTL1 0xF4 /* Power Control */
#define S6D27A1_SRCCTL 0xF6 /* Source Control */
#define S6D27A1_PANELCTL 0xF7 /* Panel Control*/
static const u8 s6d27a1_dbi_read_commands[] = {
S6D27A1_READID1,
S6D27A1_READID2,
S6D27A1_READID3,
0, /* sentinel */
};
struct s6d27a1 {
struct device *dev;
struct mipi_dbi dbi;
struct drm_panel panel;
struct gpio_desc *reset;
struct regulator_bulk_data regulators[2];
};
static const struct drm_display_mode s6d27a1_480_800_mode = {
/*
* The vendor driver states that the S6D27A1 panel
* has a pixel clock frequency of 49920000 Hz / 2 = 24960000 Hz.
*/
.clock = 24960,
.hdisplay = 480,
.hsync_start = 480 + 63,
.hsync_end = 480 + 63 + 2,
.htotal = 480 + 63 + 2 + 63,
.vdisplay = 800,
.vsync_start = 800 + 11,
.vsync_end = 800 + 11 + 2,
.vtotal = 800 + 11 + 2 + 10,
.width_mm = 50,
.height_mm = 84,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
static inline struct s6d27a1 *to_s6d27a1(struct drm_panel *panel)
{
return container_of(panel, struct s6d27a1, panel);
}
static void s6d27a1_read_mtp_id(struct s6d27a1 *ctx)
{
struct mipi_dbi *dbi = &ctx->dbi;
u8 id1, id2, id3;
int ret;
ret = mipi_dbi_command_read(dbi, S6D27A1_READID1, &id1);
if (ret) {
dev_err(ctx->dev, "unable to read MTP ID 1\n");
return;
}
ret = mipi_dbi_command_read(dbi, S6D27A1_READID2, &id2);
if (ret) {
dev_err(ctx->dev, "unable to read MTP ID 2\n");
return;
}
ret = mipi_dbi_command_read(dbi, S6D27A1_READID3, &id3);
if (ret) {
dev_err(ctx->dev, "unable to read MTP ID 3\n");
return;
}
dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3);
}
static int s6d27a1_power_on(struct s6d27a1 *ctx)
{
struct mipi_dbi *dbi = &ctx->dbi;
int ret;
/* Power up */
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->regulators),
ctx->regulators);
if (ret) {
dev_err(ctx->dev, "failed to enable regulators: %d\n", ret);
return ret;
}
msleep(20);
/* Assert reset >=1 ms */
gpiod_set_value_cansleep(ctx->reset, 1);
usleep_range(1000, 5000);
/* De-assert reset */
gpiod_set_value_cansleep(ctx->reset, 0);
/* Wait >= 10 ms */
msleep(20);
/*
* Exit sleep mode and initialize display - some hammering is
* necessary.
*/
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(120);
/* Magic to unlock level 2 control of the display */
mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0x5A, 0x5A);
/* Configure resolution to 480RGBx800 */
mipi_dbi_command(dbi, S6D27A1_RESCTL, 0x22);
mipi_dbi_command(dbi, S6D27A1_PANELCTL2, 0x00, 0x02, 0x03, 0x04, 0x05, 0x08, 0x00, 0x0c);
mipi_dbi_command(dbi, S6D27A1_MANPWR, 0x01, 0x00, 0x00, 0x08, 0x08, 0x02, 0x00);
mipi_dbi_command(dbi, S6D27A1_DISPCTL, 0x19, 0x00, 0x08, 0x0D, 0x03, 0x41, 0x3F);
mipi_dbi_command(dbi, S6D27A1_PWRCTL1, 0x00, 0x00, 0x00, 0x00, 0x55,
0x44, 0x05, 0x88, 0x4B, 0x50);
mipi_dbi_command(dbi, S6D27A1_SRCCTL, 0x03, 0x09, 0x8A, 0x00, 0x01, 0x16);
mipi_dbi_command(dbi, S6D27A1_PANELCTL, 0x00, 0x05, 0x06, 0x07, 0x08,
0x01, 0x09, 0x0D, 0x0A, 0x0E,
0x0B, 0x0F, 0x0C, 0x10, 0x01,
0x11, 0x12, 0x13, 0x14, 0x05,
0x06, 0x07, 0x08, 0x01, 0x09,
0x0D, 0x0A, 0x0E, 0x0B, 0x0F,
0x0C, 0x10, 0x01, 0x11, 0x12,
0x13, 0x14);
/* lock the level 2 control */
mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0xA5, 0xA5);
s6d27a1_read_mtp_id(ctx);
return 0;
}
static int s6d27a1_power_off(struct s6d27a1 *ctx)
{
/* Go into RESET and disable regulators */
gpiod_set_value_cansleep(ctx->reset, 1);
return regulator_bulk_disable(ARRAY_SIZE(ctx->regulators),
ctx->regulators);
}
static int s6d27a1_unprepare(struct drm_panel *panel)
{
struct s6d27a1 *ctx = to_s6d27a1(panel);
struct mipi_dbi *dbi = &ctx->dbi;
mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE);
msleep(120);
return s6d27a1_power_off(to_s6d27a1(panel));
}
static int s6d27a1_disable(struct drm_panel *panel)
{
struct s6d27a1 *ctx = to_s6d27a1(panel);
struct mipi_dbi *dbi = &ctx->dbi;
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
msleep(25);
return 0;
}
static int s6d27a1_prepare(struct drm_panel *panel)
{
return s6d27a1_power_on(to_s6d27a1(panel));
}
static int s6d27a1_enable(struct drm_panel *panel)
{
struct s6d27a1 *ctx = to_s6d27a1(panel);
struct mipi_dbi *dbi = &ctx->dbi;
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
return 0;
}
static int s6d27a1_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct s6d27a1 *ctx = to_s6d27a1(panel);
struct drm_display_mode *mode;
static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
mode = drm_mode_duplicate(connector->dev, &s6d27a1_480_800_mode);
if (!mode) {
dev_err(ctx->dev, "failed to add mode\n");
return -ENOMEM;
}
connector->display_info.bpc = 8;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
connector->display_info.bus_flags =
DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
drm_mode_set_name(mode);
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
return 1;
}
static const struct drm_panel_funcs s6d27a1_drm_funcs = {
.disable = s6d27a1_disable,
.unprepare = s6d27a1_unprepare,
.prepare = s6d27a1_prepare,
.enable = s6d27a1_enable,
.get_modes = s6d27a1_get_modes,
};
static int s6d27a1_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct s6d27a1 *ctx;
int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = dev;
/*
* VCI is the analog voltage supply
* VCCIO is the digital I/O voltage supply
*/
ctx->regulators[0].supply = "vci";
ctx->regulators[1].supply = "vccio";
ret = devm_regulator_bulk_get(dev,
ARRAY_SIZE(ctx->regulators),
ctx->regulators);
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
ctx->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset)) {
ret = PTR_ERR(ctx->reset);
return dev_err_probe(dev, ret, "no RESET GPIO\n");
}
ret = mipi_dbi_spi_init(spi, &ctx->dbi, NULL);
if (ret)
return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
ctx->dbi.read_commands = s6d27a1_dbi_read_commands;
drm_panel_init(&ctx->panel, dev, &s6d27a1_drm_funcs,
DRM_MODE_CONNECTOR_DPI);
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "failed to add backlight\n");
spi_set_drvdata(spi, ctx);
drm_panel_add(&ctx->panel);
return 0;
}
static int s6d27a1_remove(struct spi_device *spi)
{
struct s6d27a1 *ctx = spi_get_drvdata(spi);
drm_panel_remove(&ctx->panel);
return 0;
}
static const struct of_device_id s6d27a1_match[] = {
{ .compatible = "samsung,s6d27a1", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, s6d27a1_match);
static struct spi_driver s6d27a1_driver = {
.probe = s6d27a1_probe,
.remove = s6d27a1_remove,
.driver = {
.name = "s6d27a1-panel",
.of_match_table = s6d27a1_match,
},
};
module_spi_driver(s6d27a1_driver);
MODULE_AUTHOR("Markuss Broks <markuss.broks@gmail.com>");
MODULE_DESCRIPTION("Samsung S6D27A1 panel driver");
MODULE_LICENSE("GPL v2");

View File

@ -3158,19 +3158,6 @@ static const struct panel_desc logictechno_lttd800480070_l6wh_rt = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
.clock = 30400,
.hdisplay = 800,
.hsync_start = 800 + 0,
.hsync_end = 800 + 1,
.htotal = 800 + 0 + 1 + 160,
.vdisplay = 480,
.vsync_start = 480 + 0,
.vsync_end = 480 + 48 + 1,
.vtotal = 480 + 48 + 1 + 0,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct drm_display_mode logicpd_type_28_mode = {
.clock = 9107,
.hdisplay = 480,
@ -3205,6 +3192,19 @@ static const struct panel_desc logicpd_type_28 = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
.clock = 30400,
.hdisplay = 800,
.hsync_start = 800 + 0,
.hsync_end = 800 + 1,
.htotal = 800 + 0 + 1 + 160,
.vdisplay = 480,
.vsync_start = 480 + 0,
.vsync_end = 480 + 48 + 1,
.vtotal = 480 + 48 + 1 + 0,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
static const struct panel_desc mitsubishi_aa070mc01 = {
.modes = &mitsubishi_aa070mc01_mode,
.num_modes = 1,

View File

@ -198,7 +198,6 @@ err:
int panfrost_device_init(struct panfrost_device *pfdev)
{
int err;
struct resource *res;
mutex_init(&pfdev->sched_lock);
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
@ -236,8 +235,7 @@ int panfrost_device_init(struct panfrost_device *pfdev)
if (err)
goto out_reset;
res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
if (IS_ERR(pfdev->iomem)) {
err = PTR_ERR(pfdev->iomem);
goto out_pm_domain;

View File

@ -218,7 +218,7 @@ panfrost_copy_in_sync(struct drm_device *dev,
if (ret)
goto fail;
ret = drm_gem_fence_array_add(&job->deps, fence);
ret = drm_sched_job_add_dependency(&job->base, fence);
if (ret)
goto fail;
@ -236,7 +236,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
struct drm_panfrost_submit *args = data;
struct drm_syncobj *sync_out = NULL;
struct panfrost_job *job;
int ret = 0;
int ret = 0, slot;
if (!args->jc)
return -EINVAL;
@ -253,38 +253,47 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
job = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job) {
ret = -ENOMEM;
goto fail_out_sync;
goto out_put_syncout;
}
kref_init(&job->refcount);
xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
job->pfdev = pfdev;
job->jc = args->jc;
job->requirements = args->requirements;
job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
job->file_priv = file->driver_priv;
slot = panfrost_job_get_slot(job);
ret = drm_sched_job_init(&job->base,
&job->file_priv->sched_entity[slot],
NULL);
if (ret)
goto out_put_job;
ret = panfrost_copy_in_sync(dev, file, args, job);
if (ret)
goto fail_job;
goto out_cleanup_job;
ret = panfrost_lookup_bos(dev, file, args, job);
if (ret)
goto fail_job;
goto out_cleanup_job;
ret = panfrost_job_push(job);
if (ret)
goto fail_job;
goto out_cleanup_job;
/* Update the return sync object for the job */
if (sync_out)
drm_syncobj_replace_fence(sync_out, job->render_done_fence);
fail_job:
out_cleanup_job:
if (ret)
drm_sched_job_cleanup(&job->base);
out_put_job:
panfrost_job_put(job);
fail_out_sync:
out_put_syncout:
if (sync_out)
drm_syncobj_put(sync_out);

View File

@ -102,7 +102,7 @@ static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, in
return &fence->base;
}
static int panfrost_job_get_slot(struct panfrost_job *job)
int panfrost_job_get_slot(struct panfrost_job *job)
{
/* JS0: fragment jobs.
* JS1: vertex/tiler jobs
@ -137,8 +137,8 @@ static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
*/
affinity = pfdev->features.shader_present;
job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity));
job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity));
}
static u32
@ -203,8 +203,8 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
panfrost_job_write_affinity(pfdev, job->requirements, js);
@ -242,13 +242,14 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
int bo_count,
struct xarray *deps)
struct drm_sched_job *job)
{
int i, ret;
for (i = 0; i < bo_count; i++) {
/* panfrost always uses write mode in its current uapi */
ret = drm_gem_fence_array_add_implicit(deps, bos[i], true);
ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
true);
if (ret)
return ret;
}
@ -269,29 +270,21 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
int panfrost_job_push(struct panfrost_job *job)
{
struct panfrost_device *pfdev = job->pfdev;
int slot = panfrost_job_get_slot(job);
struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
ret = drm_gem_lock_reservations(job->bos, job->bo_count,
&acquire_ctx);
if (ret)
return ret;
mutex_lock(&pfdev->sched_lock);
ret = drm_sched_job_init(&job->base, entity, NULL);
if (ret) {
mutex_unlock(&pfdev->sched_lock);
goto unlock;
}
drm_sched_job_arm(&job->base);
job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
&job->deps);
&job->base);
if (ret) {
mutex_unlock(&pfdev->sched_lock);
goto unlock;
@ -299,7 +292,7 @@ int panfrost_job_push(struct panfrost_job *job)
kref_get(&job->refcount); /* put by scheduler job completion */
drm_sched_entity_push_job(&job->base, entity);
drm_sched_entity_push_job(&job->base);
mutex_unlock(&pfdev->sched_lock);
@ -316,15 +309,8 @@ static void panfrost_job_cleanup(struct kref *ref)
{
struct panfrost_job *job = container_of(ref, struct panfrost_job,
refcount);
struct dma_fence *fence;
unsigned long index;
unsigned int i;
xa_for_each(&job->deps, index, fence) {
dma_fence_put(fence);
}
xa_destroy(&job->deps);
dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence);
@ -363,17 +349,6 @@ static void panfrost_job_free(struct drm_sched_job *sched_job)
panfrost_job_put(job);
}
static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity)
{
struct panfrost_job *job = to_panfrost_job(sched_job);
if (!xa_empty(&job->deps))
return xa_erase(&job->deps, job->last_dep++);
return NULL;
}
static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
{
struct panfrost_job *job = to_panfrost_job(sched_job);
@ -763,7 +738,6 @@ static void panfrost_reset_work(struct work_struct *work)
}
static const struct drm_sched_backend_ops panfrost_sched_ops = {
.dependency = panfrost_job_dependency,
.run_job = panfrost_job_run,
.timedout_job = panfrost_job_timedout,
.free_job = panfrost_job_free

View File

@ -19,10 +19,6 @@ struct panfrost_job {
struct panfrost_device *pfdev;
struct panfrost_file_priv *file_priv;
/* Contains both explicit and implicit fences */
struct xarray deps;
unsigned long last_dep;
/* Fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *done_fence;
@ -42,6 +38,7 @@ int panfrost_job_init(struct panfrost_device *pfdev);
void panfrost_job_fini(struct panfrost_device *pfdev);
int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
int panfrost_job_get_slot(struct panfrost_job *job);
int panfrost_job_push(struct panfrost_job *job);
void panfrost_job_put(struct panfrost_job *job);
void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);

View File

@ -71,8 +71,8 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
region |= region_width;
/* Lock the region that needs to be updated */
mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
}
@ -114,14 +114,14 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
/* Need to revisit mem attrs.
* NC is the default, Mali driver is inner WT.
*/
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}

View File

@ -51,8 +51,8 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
reinit_completion(&pfdev->perfcnt->dump_comp);
gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
gpu_write(pfdev, GPU_PERFCNT_BASE_LO, lower_32_bits(gpuva));
gpu_write(pfdev, GPU_PERFCNT_BASE_HI, upper_32_bits(gpuva));
gpu_write(pfdev, GPU_INT_CLEAR,
GPU_IRQ_CLEAN_CACHES_COMPLETED |
GPU_IRQ_PERFCNT_SAMPLE_COMPLETED);

View File

@ -36,10 +36,10 @@
/* manage releaseables */
/* stack them 16 high for now -drawable object is 191 */
#define RELEASE_SIZE 256
#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
#define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
#define SURFACE_RELEASE_SIZE 128
#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
#define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };

View File

@ -101,7 +101,6 @@ int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
*/
static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm);
kfree(ttm);
}

View File

@ -99,7 +99,8 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
for (i = 0; i < pages; i++) {
if (!entry->busaddr[i])
break;
pci_unmap_page(pdev, entry->busaddr[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
dma_unmap_page(&pdev->dev, entry->busaddr[i],
PAGE_SIZE, DMA_BIDIRECTIONAL);
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
@ -134,7 +135,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
if (pci_set_dma_mask(pdev, gart_info->table_mask)) {
if (dma_set_mask(&pdev->dev, gart_info->table_mask)) {
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
(unsigned long long)gart_info->table_mask);
ret = -EFAULT;
@ -173,9 +174,9 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
gart_idx = 0;
for (i = 0; i < pages; i++) {
/* we need to support large memory configurations */
entry->busaddr[i] = pci_map_page(pdev, entry->pagelist[i],
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(pdev, entry->busaddr[i])) {
entry->busaddr[i] = dma_map_page(&pdev->dev, entry->pagelist[i],
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&pdev->dev, entry->busaddr[i])) {
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_pcigart_cleanup(dev, gart_info);
address = NULL;

View File

@ -176,18 +176,11 @@ static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode,
*/
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
if (seq >= fence->seq) {
int ret = dma_fence_signal_locked(&fence->base);
if (!ret)
DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
else
DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
dma_fence_signal_locked(&fence->base);
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
dma_fence_put(&fence->base);
} else
DMA_FENCE_TRACE(&fence->base, "pending\n");
}
return 0;
}
@ -422,8 +415,6 @@ static bool radeon_fence_enable_signaling(struct dma_fence *f)
fence->fence_wake.func = radeon_fence_check_signaled;
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
dma_fence_get(f);
DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
return true;
}
@ -441,11 +432,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
return true;
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
int ret;
ret = dma_fence_signal(&fence->base);
if (!ret)
DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
dma_fence_signal(&fence->base);
return true;
}
return false;
@ -550,7 +537,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
{
uint64_t seq[RADEON_NUM_RINGS] = {};
long r;
int r_sig;
/*
* This function should not be called on !radeon fences.
@ -567,9 +553,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
return r;
}
r_sig = dma_fence_signal(&fence->base);
if (!r_sig)
DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
dma_fence_signal(&fence->base);
return r;
}

View File

@ -488,9 +488,6 @@ static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *t
{
struct radeon_ttm_tt *gtt = (void *)ttm;
radeon_ttm_backend_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(&gtt->ttm);
kfree(gtt);
}
@ -574,6 +571,8 @@ static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
radeon_ttm_tt_unbind(bdev, ttm);
if (gtt && gtt->userptr) {
kfree(ttm->sg);
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
@ -651,8 +650,6 @@ static void radeon_ttm_tt_destroy(struct ttm_device *bdev,
struct radeon_device *rdev = radeon_get_rdev(bdev);
if (rdev->flags & RADEON_IS_AGP) {
ttm_agp_unbind(ttm);
ttm_tt_destroy_common(bdev, ttm);
ttm_agp_destroy(ttm);
return;
}

View File

@ -9,7 +9,6 @@ config DRM_ROCKCHIP
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
select DRM_RGB if ROCKCHIP_RGB
select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI
select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI
select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC

View File

@ -45,8 +45,14 @@
* @guilty: atomic_t set to 1 when a job on this queue
* is found to be guilty causing a timeout
*
* Note: the sched_list should have at least one element to schedule
* the entity
* Note that the &sched_list must have at least one element to schedule the entity.
*
* For changing @priority later on at runtime see
* drm_sched_entity_set_priority(). For changing the set of schedulers
* @sched_list at runtime see drm_sched_entity_modify_sched().
*
* An entity is cleaned up by callind drm_sched_entity_fini(). See also
* drm_sched_entity_destroy().
*
* Returns 0 on success or a negative error code on failure.
*/
@ -92,6 +98,11 @@ EXPORT_SYMBOL(drm_sched_entity_init);
* @sched_list: the list of new drm scheds which will replace
* existing entity->sched_list
* @num_sched_list: number of drm sched in sched_list
*
* Note that this must be called under the same common lock for @entity as
* drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
* guarantee through some other means that this is never called while new jobs
* can be pushed to @entity.
*/
void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
@ -104,13 +115,6 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
}
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
/**
* drm_sched_entity_is_idle - Check if entity is idle
*
* @entity: scheduler entity
*
* Returns true if the entity does not have any unscheduled jobs.
*/
static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb(); /* for list_empty to work without lock */
@ -123,13 +127,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
return false;
}
/**
* drm_sched_entity_is_ready - Check if entity is ready
*
* @entity: scheduler entity
*
* Return true if entity could provide a job.
*/
/* Return true if entity could provide a job. */
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
{
if (spsc_queue_peek(&entity->job_queue) == NULL)
@ -192,14 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
}
EXPORT_SYMBOL(drm_sched_entity_flush);
/**
* drm_sched_entity_kill_jobs_cb - helper for drm_sched_entity_kill_jobs
*
* @f: signaled fence
* @cb: our callback structure
*
* Signal the scheduler finished fence when the entity in question is killed.
*/
/* Signal the scheduler finished fence when the entity in question is killed. */
static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct dma_fence_cb *cb)
{
@ -211,14 +202,19 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
job->sched->ops->free_job(job);
}
/**
* drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
*
* @entity: entity which is cleaned up
*
* Makes sure that all remaining jobs in an entity are killed before it is
* destroyed.
*/
static struct dma_fence *
drm_sched_job_dependency(struct drm_sched_job *job,
struct drm_sched_entity *entity)
{
if (!xa_empty(&job->dependencies))
return xa_erase(&job->dependencies, job->last_dependency++);
if (job->sched->ops->dependency)
return job->sched->ops->dependency(job, entity);
return NULL;
}
static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
{
struct drm_sched_job *job;
@ -229,7 +225,7 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
struct drm_sched_fence *s_fence = job->s_fence;
/* Wait for all dependencies to avoid data corruptions */
while ((f = job->sched->ops->dependency(job, entity)))
while ((f = drm_sched_job_dependency(job, entity)))
dma_fence_wait(f, false);
drm_sched_fence_scheduled(s_fence);
@ -260,9 +256,11 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
*
* @entity: scheduler entity
*
* This should be called after @drm_sched_entity_do_release. It goes over the
* entity and signals all jobs with an error code if the process was killed.
* Cleanups up @entity which has been initialized by drm_sched_entity_init().
*
* If there are potentially job still in flight or getting newly queued
* drm_sched_entity_flush() must be called first. This function then goes over
* the entity and signals all jobs with an error code if the process was killed.
*/
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
@ -302,10 +300,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
/**
* drm_sched_entity_destroy - Destroy a context entity
*
* @entity: scheduler entity
*
* Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
* Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
* convenience wrapper.
*/
void drm_sched_entity_destroy(struct drm_sched_entity *entity)
{
@ -314,9 +312,7 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity)
}
EXPORT_SYMBOL(drm_sched_entity_destroy);
/*
* drm_sched_entity_clear_dep - callback to clear the entities dependency
*/
/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
static void drm_sched_entity_clear_dep(struct dma_fence *f,
struct dma_fence_cb *cb)
{
@ -358,11 +354,7 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
/**
* drm_sched_entity_add_dependency_cb - add callback for the entities dependency
*
* @entity: entity with dependency
*
/*
* Add a callback to the current dependency of the entity to wake up the
* scheduler when the entity becomes available.
*/
@ -410,16 +402,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
return false;
}
/**
* drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
*
* @entity: entity to get the job from
*
* Process all dependencies and try to get one job from the entities queue.
*/
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_job *sched_job;
sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
@ -427,7 +411,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
return NULL;
while ((entity->dependency =
sched->ops->dependency(sched_job, entity))) {
drm_sched_job_dependency(sched_job, entity))) {
trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
if (drm_sched_entity_add_dependency_cb(entity))
@ -439,30 +423,45 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
dma_fence_put(entity->last_scheduled);
entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
/*
* If the queue is empty we allow drm_sched_entity_select_rq() to
* locklessly access ->last_scheduled. This only works if we set the
* pointer before we dequeue and if we a write barrier here.
*/
smp_wmb();
spsc_queue_pop(&entity->job_queue);
return sched_job;
}
/**
* drm_sched_entity_select_rq - select a new rq for the entity
*
* @entity: scheduler entity
*
* Check all prerequisites and select a new rq for the entity for load
* balancing.
*/
void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
{
struct dma_fence *fence;
struct drm_gpu_scheduler *sched;
struct drm_sched_rq *rq;
if (spsc_queue_count(&entity->job_queue) || !entity->sched_list)
/* single possible engine and already selected */
if (!entity->sched_list)
return;
fence = READ_ONCE(entity->last_scheduled);
/* queue non-empty, stay on the same engine */
if (spsc_queue_count(&entity->job_queue))
return;
/*
* Only when the queue is empty are we guaranteed that the scheduler
* thread cannot change ->last_scheduled. To enforce ordering we need
* a read barrier here. See drm_sched_entity_pop_job() for the other
* side.
*/
smp_rmb();
fence = entity->last_scheduled;
/* stay on the same engine if the previous job hasn't finished */
if (fence && !dma_fence_is_signaled(fence))
return;
@ -481,19 +480,18 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
/**
* drm_sched_entity_push_job - Submit a job to the entity's job queue
*
* @sched_job: job to submit
* @entity: scheduler entity
*
* Note: To guarantee that the order of insertion to queue matches
* the job's fence sequence number this function should be
* called with drm_sched_job_init under common lock.
* Note: To guarantee that the order of insertion to queue matches the job's
* fence sequence number this function should be called with drm_sched_job_arm()
* under common lock for the struct drm_sched_entity that was set up for
* @sched_job in drm_sched_job_init().
*
* Returns 0 for success, negative error code otherwise.
*/
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity)
void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{
struct drm_sched_entity *entity = sched_job->entity;
bool first;
trace_drm_sched_job(sched_job, entity);

View File

@ -50,26 +50,12 @@ static void __exit drm_sched_fence_slab_fini(void)
void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
{
int ret = dma_fence_signal(&fence->scheduled);
if (!ret)
DMA_FENCE_TRACE(&fence->scheduled,
"signaled from irq context\n");
else
DMA_FENCE_TRACE(&fence->scheduled,
"was already signaled\n");
dma_fence_signal(&fence->scheduled);
}
void drm_sched_fence_finished(struct drm_sched_fence *fence)
{
int ret = dma_fence_signal(&fence->finished);
if (!ret)
DMA_FENCE_TRACE(&fence->finished,
"signaled from irq context\n");
else
DMA_FENCE_TRACE(&fence->finished,
"was already signaled\n");
dma_fence_signal(&fence->finished);
}
static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
@ -83,19 +69,28 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
return (const char *)fence->sched->name;
}
/**
* drm_sched_fence_free - free up the fence memory
*
* @rcu: RCU callback head
*
* Free up the fence memory after the RCU grace period.
*/
static void drm_sched_fence_free(struct rcu_head *rcu)
static void drm_sched_fence_free_rcu(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct drm_sched_fence *fence = to_drm_sched_fence(f);
kmem_cache_free(sched_fence_slab, fence);
if (!WARN_ON_ONCE(!fence))
kmem_cache_free(sched_fence_slab, fence);
}
/**
* drm_sched_fence_free - free up an uninitialized fence
*
* @fence: fence to free
*
* Free up the fence memory. Should only be used if drm_sched_fence_init()
* has not been called yet.
*/
void drm_sched_fence_free(struct drm_sched_fence *fence)
{
/* This function should not be called if the fence has been initialized. */
if (!WARN_ON_ONCE(fence->sched))
kmem_cache_free(sched_fence_slab, fence);
}
/**
@ -111,7 +106,7 @@ static void drm_sched_fence_release_scheduled(struct dma_fence *f)
struct drm_sched_fence *fence = to_drm_sched_fence(f);
dma_fence_put(fence->parent);
call_rcu(&fence->finished.rcu, drm_sched_fence_free);
call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu);
}
/**
@ -152,27 +147,32 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
}
EXPORT_SYMBOL(to_drm_sched_fence);
struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
void *owner)
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
void *owner)
{
struct drm_sched_fence *fence = NULL;
unsigned seq;
fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
if (fence == NULL)
return NULL;
fence->owner = owner;
fence->sched = entity->rq->sched;
spin_lock_init(&fence->lock);
return fence;
}
void drm_sched_fence_init(struct drm_sched_fence *fence,
struct drm_sched_entity *entity)
{
unsigned seq;
fence->sched = entity->rq->sched;
seq = atomic_inc_return(&entity->fence_seq);
dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
&fence->lock, entity->fence_context, seq);
dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
&fence->lock, entity->fence_context + 1, seq);
return fence;
}
module_init(drm_sched_fence_slab_init);

View File

@ -48,9 +48,11 @@
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/dma-resv.h>
#include <uapi/linux/sched/types.h>
#include <drm/drm_print.h>
#include <drm/drm_gem.h>
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
@ -564,7 +566,6 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
/**
* drm_sched_job_init - init a scheduler job
*
* @job: scheduler job to init
* @entity: scheduler entity to use
* @owner: job owner for debugging
@ -572,43 +573,193 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
* Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
*
* Drivers must make sure drm_sched_job_cleanup() if this function returns
* successfully, even when @job is aborted before drm_sched_job_arm() is called.
*
* WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
* has died, which can mean that there's no valid runqueue for a @entity.
* This function returns -ENOENT in this case (which probably should be -EIO as
* a more meanigful return value).
*
* Returns 0 for success, negative error code otherwise.
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner)
{
struct drm_gpu_scheduler *sched;
drm_sched_entity_select_rq(entity);
if (!entity->rq)
return -ENOENT;
sched = entity->rq->sched;
job->sched = sched;
job->entity = entity;
job->s_priority = entity->rq - sched->sched_rq;
job->s_fence = drm_sched_fence_create(entity, owner);
job->s_fence = drm_sched_fence_alloc(entity, owner);
if (!job->s_fence)
return -ENOMEM;
job->id = atomic64_inc_return(&sched->job_id_count);
INIT_LIST_HEAD(&job->list);
xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
return 0;
}
EXPORT_SYMBOL(drm_sched_job_init);
/**
* drm_sched_job_cleanup - clean up scheduler job resources
* drm_sched_job_arm - arm a scheduler job for execution
* @job: scheduler job to arm
*
* This arms a scheduler job for execution. Specifically it initializes the
* &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
* or other places that need to track the completion of this job.
*
* Refer to drm_sched_entity_push_job() documentation for locking
* considerations.
*
* This can only be called if drm_sched_job_init() succeeded.
*/
void drm_sched_job_arm(struct drm_sched_job *job)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_entity *entity = job->entity;
BUG_ON(!entity);
sched = entity->rq->sched;
job->sched = sched;
job->s_priority = entity->rq - sched->sched_rq;
job->id = atomic64_inc_return(&sched->job_id_count);
drm_sched_fence_init(job->s_fence, job->entity);
}
EXPORT_SYMBOL(drm_sched_job_arm);
/**
* drm_sched_job_add_dependency - adds the fence as a job dependency
* @job: scheduler job to add the dependencies to
* @fence: the dma_fence to add to the list of dependencies.
*
* Note that @fence is consumed in both the success and error cases.
*
* Returns:
* 0 on success, or an error on failing to expand the array.
*/
int drm_sched_job_add_dependency(struct drm_sched_job *job,
struct dma_fence *fence)
{
struct dma_fence *entry;
unsigned long index;
u32 id = 0;
int ret;
if (!fence)
return 0;
/* Deduplicate if we already depend on a fence from the same context.
* This lets the size of the array of deps scale with the number of
* engines involved, rather than the number of BOs.
*/
xa_for_each(&job->dependencies, index, entry) {
if (entry->context != fence->context)
continue;
if (dma_fence_is_later(fence, entry)) {
dma_fence_put(entry);
xa_store(&job->dependencies, index, fence, GFP_KERNEL);
} else {
dma_fence_put(fence);
}
return 0;
}
ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
if (ret != 0)
dma_fence_put(fence);
return ret;
}
EXPORT_SYMBOL(drm_sched_job_add_dependency);
/**
* drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
* dependencies
* @job: scheduler job to add the dependencies to
* @obj: the gem object to add new dependencies from.
* @write: whether the job might write the object (so we need to depend on
* shared fences in the reservation object).
*
* This should be called after drm_gem_lock_reservations() on your array of
* GEM objects used in the job but before updating the reservations with your
* own fences.
*
* Returns:
* 0 on success, or an error on failing to expand the array.
*/
int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
struct drm_gem_object *obj,
bool write)
{
int ret;
struct dma_fence **fences;
unsigned int i, fence_count;
if (!write) {
struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
return drm_sched_job_add_dependency(job, fence);
}
ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
if (ret || !fence_count)
return ret;
for (i = 0; i < fence_count; i++) {
ret = drm_sched_job_add_dependency(job, fences[i]);
if (ret)
break;
}
for (; i < fence_count; i++)
dma_fence_put(fences[i]);
kfree(fences);
return ret;
}
EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
/**
* drm_sched_job_cleanup - clean up scheduler job resources
* @job: scheduler job to clean up
*
* Cleans up the resources allocated with drm_sched_job_init().
*
* Drivers should call this from their error unwind code if @job is aborted
* before drm_sched_job_arm() is called.
*
* After that point of no return @job is committed to be executed by the
* scheduler, and this function should be called from the
* &drm_sched_backend_ops.free_job callback.
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
dma_fence_put(&job->s_fence->finished);
struct dma_fence *fence;
unsigned long index;
if (kref_read(&job->s_fence->finished.refcount)) {
/* drm_sched_job_arm() has been called */
dma_fence_put(&job->s_fence->finished);
} else {
/* aborted job before committing to run it */
drm_sched_fence_free(job->s_fence);
}
job->s_fence = NULL;
xa_for_each(&job->dependencies, index, fence) {
dma_fence_put(fence);
}
xa_destroy(&job->dependencies);
}
EXPORT_SYMBOL(drm_sched_job_cleanup);
@ -676,15 +827,6 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job, *next;
/*
* Don't destroy jobs while the timeout worker is running OR thread
* is being parked and hence assumed to not touch pending_list
*/
if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!cancel_delayed_work(&sched->work_tdr)) ||
kthread_should_park())
return NULL;
spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
@ -693,17 +835,21 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
/* cancel this job's TO timer */
cancel_delayed_work(&sched->work_tdr);
/* make the scheduled timestamp more accurate */
next = list_first_entry_or_null(&sched->pending_list,
typeof(*next), list);
if (next)
if (next) {
next->s_fence->scheduled.timestamp =
job->s_fence->finished.timestamp;
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}
} else {
job = NULL;
/* queue timeout for next job */
drm_sched_start_timeout(sched);
}
spin_unlock(&sched->job_list_lock);
@ -791,11 +937,8 @@ static int drm_sched_main(void *param)
(entity = drm_sched_select_entity(sched))) ||
kthread_should_stop());
if (cleanup_job) {
if (cleanup_job)
sched->ops->free_job(cleanup_job);
/* queue timeout for next job */
drm_sched_start_timeout(sched);
}
if (!entity)
continue;

View File

@ -845,7 +845,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
LXCFBLR_CFBLL | LXCFBLR_CFBP, val);
/* Specifies the constant alpha value */
val = CONSTA_MAX;
val = newstate->alpha >> 8;
reg_update_bits(ldev->regs, LTDC_L1CACR + lofs, LXCACR_CONSTA, val);
/* Specifies the blending factors */
@ -997,6 +997,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
drm_plane_helper_add(plane, &ltdc_plane_helper_funcs);
drm_plane_create_alpha_property(plane);
DRM_DEBUG_DRIVER("plane:%d created\n", plane->base.id);
return plane;
@ -1024,6 +1026,8 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
return -EINVAL;
}
drm_plane_create_zpos_immutable_property(primary, 0);
ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
&ltdc_crtc_funcs, NULL);
if (ret) {
@ -1046,6 +1050,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
DRM_ERROR("Can not create overlay plane %d\n", i);
goto cleanup;
}
drm_plane_create_zpos_immutable_property(overlay, i);
}
return 0;

View File

@ -782,7 +782,6 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_backend *backend;
const struct sun4i_backend_quirks *quirks;
struct resource *res;
void __iomem *regs;
int i, ret;
@ -815,8 +814,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
if (IS_ERR(backend->frontend))
dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);

View File

@ -561,7 +561,6 @@ static int sun4i_frontend_bind(struct device *dev, struct device *master,
struct sun4i_frontend *frontend;
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct resource *res;
void __iomem *regs;
frontend = devm_kzalloc(dev, sizeof(*frontend), GFP_KERNEL);
@ -576,8 +575,7 @@ static int sun4i_frontend_bind(struct device *dev, struct device *master,
if (!frontend->data)
return -ENODEV;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);

View File

@ -489,7 +489,6 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
struct cec_connector_info conn_info;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_hdmi *hdmi;
struct resource *res;
u32 reg;
int ret;
@ -504,8 +503,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
if (!hdmi->variant)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hdmi->base = devm_ioremap_resource(dev, res);
hdmi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->base)) {
dev_err(dev, "Couldn't map the HDMI encoder registers\n");
return PTR_ERR(hdmi->base);

View File

@ -841,11 +841,9 @@ static int sun4i_tcon_init_regmap(struct device *dev,
struct sun4i_tcon *tcon)
{
struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
void __iomem *regs;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);

View File

@ -538,7 +538,6 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_tv *tv;
struct resource *res;
void __iomem *regs;
int ret;
@ -548,8 +547,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
tv->drv = drv;
dev_set_drvdata(dev, tv);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs)) {
dev_err(dev, "Couldn't map the TV encoder registers\n");
return PTR_ERR(regs);

View File

@ -1104,7 +1104,6 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const char *bus_clk_name = NULL;
struct sun6i_dsi *dsi;
struct resource *res;
void __iomem *base;
int ret;
@ -1120,8 +1119,7 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
"allwinner,sun6i-a31-mipi-dsi"))
bus_clk_name = "bus";
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(dev, res);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(dev, "Couldn't map the DSI encoder registers\n");
return PTR_ERR(base);

View File

@ -16,8 +16,8 @@ struct sun8i_mixer;
#define CCSC10_OFFSET 0xA0000
#define CCSC11_OFFSET 0xF0000
#define SUN8I_CSC_CTRL(base) (base + 0x0)
#define SUN8I_CSC_COEFF(base, i) (base + 0x10 + 4 * i)
#define SUN8I_CSC_CTRL(base) ((base) + 0x0)
#define SUN8I_CSC_COEFF(base, i) ((base) + 0x10 + 4 * (i))
#define SUN8I_CSC_CTRL_EN BIT(0)

View File

@ -337,7 +337,6 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun8i_mixer *mixer;
struct resource *res;
void __iomem *regs;
unsigned int base;
int plane_cnt;
@ -390,8 +389,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
if (!mixer->cfg)
return -EINVAL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);

View File

@ -128,7 +128,6 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
struct clk_hw_onecell_data *clk_data;
struct sun8i_tcon_top *tcon_top;
const struct sun8i_tcon_top_quirks *quirks;
struct resource *res;
void __iomem *regs;
int ret, i;
@ -158,8 +157,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
return PTR_ERR(tcon_top->bus);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(dev, res);
regs = devm_platform_ioremap_resource(pdev, 0);
tcon_top->regs = regs;
if (IS_ERR(regs))
return PTR_ERR(regs);

View File

@ -44,7 +44,7 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
{
uint64_t modifier = framebuffer->modifier;
if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
if ((modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) == 0)
tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_TEGRA;
else

View File

@ -113,7 +113,7 @@ static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
return true;
/* check for the sector layout bit */
if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
if (!tegra_plane_supports_sector_layout(plane))
return false;

View File

@ -44,7 +44,7 @@ config DRM_CIRRUS_QEMU
config DRM_GM12U320
tristate "GM12U320 driver for USB projectors"
depends on DRM && USB
depends on DRM && USB && MMU
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
help
@ -53,7 +53,7 @@ config DRM_GM12U320
config DRM_SIMPLEDRM
tristate "Simple framebuffer driver"
depends on DRM
depends on DRM && MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help

View File

@ -63,6 +63,7 @@ MODULE_PARM_DESC(defy, "default y resolution");
enum bochs_types {
BOCHS_QEMU_STDVGA,
BOCHS_SIMICS,
BOCHS_UNKNOWN,
};
@ -695,6 +696,13 @@ static const struct pci_device_id bochs_pci_tbl[] = {
.subdevice = PCI_ANY_ID,
.driver_data = BOCHS_UNKNOWN,
},
{
.vendor = 0x4321,
.device = 0x1111,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = BOCHS_SIMICS,
},
{ /* end of list */ }
};

View File

@ -69,7 +69,17 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
}
}
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
static inline void ttm_bo_move_to_pinned(struct ttm_buffer_object *bo)
{
struct ttm_device *bdev = bo->bdev;
list_move_tail(&bo->lru, &bdev->pinned);
if (bdev->funcs->del_from_lru_notify)
bdev->funcs->del_from_lru_notify(bo);
}
static inline void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_device *bdev = bo->bdev;
@ -98,7 +108,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
if (bo->pin_count) {
ttm_bo_del_from_lru(bo);
ttm_bo_move_to_pinned(bo);
return;
}
@ -342,7 +352,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
return ret;
}
ttm_bo_del_from_lru(bo);
ttm_bo_move_to_pinned(bo);
list_del_init(&bo->ddestroy);
spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
@ -914,57 +924,11 @@ out:
return ret;
}
static bool ttm_bo_places_compat(const struct ttm_place *places,
unsigned num_placement,
struct ttm_resource *mem,
uint32_t *new_flags)
{
unsigned i;
if (mem->placement & TTM_PL_FLAG_TEMPORARY)
return false;
for (i = 0; i < num_placement; i++) {
const struct ttm_place *heap = &places[i];
if ((mem->start < heap->fpfn ||
(heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
continue;
*new_flags = heap->flags;
if ((mem->mem_type == heap->mem_type) &&
(!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
(mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
return true;
}
return false;
}
bool ttm_bo_mem_compat(struct ttm_placement *placement,
struct ttm_resource *mem,
uint32_t *new_flags)
{
if (ttm_bo_places_compat(placement->placement, placement->num_placement,
mem, new_flags))
return true;
if ((placement->busy_placement != placement->placement ||
placement->num_busy_placement > placement->num_placement) &&
ttm_bo_places_compat(placement->busy_placement,
placement->num_busy_placement,
mem, new_flags))
return true;
return false;
}
EXPORT_SYMBOL(ttm_bo_mem_compat);
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
int ret;
uint32_t new_flags;
dma_resv_assert_held(bo->base.resv);
@ -977,7 +941,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Check whether we need to move buffer.
*/
if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) {
if (!ttm_resource_compat(bo->resource, placement)) {
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
@ -1165,7 +1129,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
return ret == -EBUSY ? -ENOSPC : ret;
}
ttm_bo_del_from_lru(bo);
ttm_bo_move_to_pinned(bo);
/* TODO: Cleanup the locking */
spin_unlock(&bo->bdev->lru_lock);
@ -1224,6 +1188,7 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
if (bo->ttm == NULL)
return;
ttm_tt_unpopulate(bo->bdev, bo->ttm);
ttm_tt_destroy(bo->bdev, bo->ttm);
bo->ttm = NULL;
}

View File

@ -220,6 +220,7 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
spin_lock_init(&bdev->lru_lock);
INIT_LIST_HEAD(&bdev->ddestroy);
INIT_LIST_HEAD(&bdev->pinned);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
@ -257,3 +258,50 @@ void ttm_device_fini(struct ttm_device *bdev)
ttm_global_release();
}
EXPORT_SYMBOL(ttm_device_fini);
void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
{
struct ttm_resource_manager *man;
struct ttm_buffer_object *bo;
unsigned int i, j;
spin_lock(&bdev->lru_lock);
while (!list_empty(&bdev->pinned)) {
bo = list_first_entry(&bdev->pinned, struct ttm_buffer_object, lru);
/* Take ref against racing releases once lru_lock is unlocked */
if (ttm_bo_get_unless_zero(bo)) {
list_del_init(&bo->lru);
spin_unlock(&bdev->lru_lock);
if (bo->ttm)
ttm_tt_unpopulate(bo->bdev, bo->ttm);
ttm_bo_put(bo);
spin_lock(&bdev->lru_lock);
}
}
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
continue;
for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
while (!list_empty(&man->lru[j])) {
bo = list_first_entry(&man->lru[j], struct ttm_buffer_object, lru);
if (ttm_bo_get_unless_zero(bo)) {
list_del_init(&bo->lru);
spin_unlock(&bdev->lru_lock);
if (bo->ttm)
ttm_tt_unpopulate(bo->bdev, bo->ttm);
ttm_bo_put(bo);
spin_lock(&bdev->lru_lock);
}
}
}
}
spin_unlock(&bdev->lru_lock);
}
EXPORT_SYMBOL(ttm_device_clear_dma_mappings);

View File

@ -39,6 +39,18 @@
#include "ttm_module.h"
/**
* DOC: TTM
*
* TTM is a memory manager for accelerator devices with dedicated memory.
*
* The basic idea is that resources are grouped together in buffer objects of
* certain size and TTM handles lifetime, movement and CPU mappings of those
* objects.
*
* TODO: Add more design background and information here.
*/
/**
* ttm_prot_from_caching - Modify the page protection according to the
* ttm cacing mode

View File

@ -70,7 +70,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
static struct mutex shrinker_lock;
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker mm_shrinker;
@ -263,9 +263,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
spin_lock_init(&pt->lock);
INIT_LIST_HEAD(&pt->pages);
mutex_lock(&shrinker_lock);
spin_lock(&shrinker_lock);
list_add_tail(&pt->shrinker_list, &shrinker_list);
mutex_unlock(&shrinker_lock);
spin_unlock(&shrinker_lock);
}
/* Remove a pool_type from the global shrinker list and free all pages */
@ -273,9 +273,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
{
struct page *p;
mutex_lock(&shrinker_lock);
spin_lock(&shrinker_lock);
list_del(&pt->shrinker_list);
mutex_unlock(&shrinker_lock);
spin_unlock(&shrinker_lock);
while ((p = ttm_pool_type_take(pt)))
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
@ -313,24 +313,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
static unsigned int ttm_pool_shrink(void)
{
struct ttm_pool_type *pt;
unsigned int num_freed;
unsigned int num_pages;
struct page *p;
mutex_lock(&shrinker_lock);
spin_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
list_move_tail(&pt->shrinker_list, &shrinker_list);
spin_unlock(&shrinker_lock);
p = ttm_pool_type_take(pt);
if (p) {
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
num_freed = 1 << pt->order;
num_pages = 1 << pt->order;
} else {
num_freed = 0;
num_pages = 0;
}
list_move_tail(&pt->shrinker_list, &shrinker_list);
mutex_unlock(&shrinker_lock);
return num_freed;
return num_pages;
}
/* Return the allocation order based for a page */
@ -531,6 +530,11 @@ void ttm_pool_fini(struct ttm_pool *pool)
for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
/* We removed the pool types from the LRU, but we need to also make sure
* that no shrinker is concurrently freeing pages from the pool.
*/
synchronize_shrinkers();
}
/* As long as pages are available make sure to release at least one */
@ -605,7 +609,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
{
ttm_pool_debugfs_header(m);
mutex_lock(&shrinker_lock);
spin_lock(&shrinker_lock);
seq_puts(m, "wc\t:");
ttm_pool_debugfs_orders(global_write_combined, m);
seq_puts(m, "uc\t:");
@ -614,7 +618,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
ttm_pool_debugfs_orders(global_dma32_write_combined, m);
seq_puts(m, "uc 32\t:");
ttm_pool_debugfs_orders(global_dma32_uncached, m);
mutex_unlock(&shrinker_lock);
spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
@ -641,7 +645,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
ttm_pool_debugfs_header(m);
mutex_lock(&shrinker_lock);
spin_lock(&shrinker_lock);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
seq_puts(m, "DMA ");
switch (i) {
@ -657,7 +661,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
}
ttm_pool_debugfs_orders(pool->caching[i].orders, m);
}
mutex_unlock(&shrinker_lock);
spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
return 0;
@ -694,7 +698,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
if (!page_pool_size)
page_pool_size = num_pages;
mutex_init(&shrinker_lock);
spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i < MAX_ORDER; ++i) {

View File

@ -138,7 +138,7 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = {
* Initialise a generic range manager for the selected memory type.
* The range manager is installed for this device in the type slot.
*/
int ttm_range_man_init(struct ttm_device *bdev,
int ttm_range_man_init_nocheck(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size)
{
@ -163,7 +163,7 @@ int ttm_range_man_init(struct ttm_device *bdev,
ttm_resource_manager_set_used(man, true);
return 0;
}
EXPORT_SYMBOL(ttm_range_man_init);
EXPORT_SYMBOL(ttm_range_man_init_nocheck);
/**
* ttm_range_man_fini
@ -173,7 +173,7 @@ EXPORT_SYMBOL(ttm_range_man_init);
*
* Remove the generic range manager from a slot and tear it down.
*/
int ttm_range_man_fini(struct ttm_device *bdev,
int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
unsigned type)
{
struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
@ -200,4 +200,4 @@ int ttm_range_man_fini(struct ttm_device *bdev,
kfree(rman);
return 0;
}
EXPORT_SYMBOL(ttm_range_man_fini);
EXPORT_SYMBOL(ttm_range_man_fini_nocheck);

View File

@ -67,6 +67,55 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
}
EXPORT_SYMBOL(ttm_resource_free);
static bool ttm_resource_places_compat(struct ttm_resource *res,
const struct ttm_place *places,
unsigned num_placement)
{
unsigned i;
if (res->placement & TTM_PL_FLAG_TEMPORARY)
return false;
for (i = 0; i < num_placement; i++) {
const struct ttm_place *heap = &places[i];
if (res->start < heap->fpfn || (heap->lpfn &&
(res->start + res->num_pages) > heap->lpfn))
continue;
if ((res->mem_type == heap->mem_type) &&
(!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
(res->placement & TTM_PL_FLAG_CONTIGUOUS)))
return true;
}
return false;
}
/**
* ttm_resource_compat - check if resource is compatible with placement
*
* @res: the resource to check
* @placement: the placement to check against
*
* Returns true if the placement is compatible.
*/
bool ttm_resource_compat(struct ttm_resource *res,
struct ttm_placement *placement)
{
if (ttm_resource_places_compat(res, placement->placement,
placement->num_placement))
return true;
if ((placement->busy_placement != placement->placement ||
placement->num_busy_placement > placement->num_placement) &&
ttm_resource_places_compat(res, placement->busy_placement,
placement->num_busy_placement))
return true;
return false;
}
EXPORT_SYMBOL(ttm_resource_compat);
/**
* ttm_resource_manager_init
*

View File

@ -122,17 +122,6 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
return 0;
}
void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
{
ttm_tt_unpopulate(bdev, ttm);
if (ttm->swap_storage)
fput(ttm->swap_storage);
ttm->swap_storage = NULL;
}
EXPORT_SYMBOL(ttm_tt_destroy_common);
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
bdev->funcs->ttm_tt_destroy(bdev, ttm);
@ -167,6 +156,12 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
WARN_ON(ttm->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED);
if (ttm->swap_storage)
fput(ttm->swap_storage);
ttm->swap_storage = NULL;
if (ttm->pages)
kvfree(ttm->pages);
else

View File

@ -4,6 +4,7 @@ config DRM_UDL
depends on DRM
depends on USB
depends on USB_ARCH_HAS_HCD
depends on MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_V3D
tristate "Broadcom V3D 3.x and newer"
depends on ARCH_BCM || ARCH_BCMSTB || COMPILE_TEST
depends on ARCH_BCM || ARCH_BRCMSTB || COMPILE_TEST
depends on DRM
depends on COMMON_CLK
depends on MMU

View File

@ -234,11 +234,6 @@ struct v3d_job {
struct drm_gem_object **bo;
u32 bo_count;
/* Array of struct dma_fence * to block on before submitting this job.
*/
struct xarray deps;
unsigned long last_dep;
/* v3d fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *irq_fence;
@ -379,6 +374,7 @@ int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void v3d_job_cleanup(struct v3d_job *job);
void v3d_job_put(struct v3d_job *job);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);

View File

@ -197,8 +197,8 @@ v3d_clean_caches(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
V3D_L2TCACTL_L2TFLS), 100)) {
DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
V3D_L2TCACTL_TMUWCF), 100)) {
DRM_ERROR("Timeout waiting for TMU write combiner flush\n");
}
mutex_lock(&v3d->cache_clean_lock);
@ -259,8 +259,8 @@ v3d_lock_bo_reservations(struct v3d_job *job,
return ret;
for (i = 0; i < job->bo_count; i++) {
ret = drm_gem_fence_array_add_implicit(&job->deps,
job->bo[i], true);
ret = drm_sched_job_add_implicit_dependencies(&job->base,
job->bo[i], true);
if (ret) {
drm_gem_unlock_reservations(job->bo, job->bo_count,
acquire_ctx);
@ -356,8 +356,6 @@ static void
v3d_job_free(struct kref *ref)
{
struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
unsigned long index;
struct dma_fence *fence;
int i;
for (i = 0; i < job->bo_count; i++) {
@ -366,11 +364,6 @@ v3d_job_free(struct kref *ref)
}
kvfree(job->bo);
xa_for_each(&job->deps, index, fence) {
dma_fence_put(fence);
}
xa_destroy(&job->deps);
dma_fence_put(job->irq_fence);
dma_fence_put(job->done_fence);
@ -397,6 +390,12 @@ v3d_render_job_free(struct kref *ref)
v3d_job_free(ref);
}
void v3d_job_cleanup(struct v3d_job *job)
{
drm_sched_job_cleanup(&job->base);
v3d_job_put(job);
}
void v3d_job_put(struct v3d_job *job)
{
kref_put(&job->refcount, job->free);
@ -438,9 +437,10 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
static int
v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
struct v3d_job *job, void (*free)(struct kref *ref),
u32 in_sync)
u32 in_sync, enum v3d_queue queue)
{
struct dma_fence *in_fence = NULL;
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
int ret;
job->v3d = v3d;
@ -450,44 +450,40 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
if (ret < 0)
return ret;
xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
v3d_priv);
if (ret)
goto fail;
ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
if (ret == -EINVAL)
goto fail;
goto fail_job;
ret = drm_gem_fence_array_add(&job->deps, in_fence);
ret = drm_sched_job_add_dependency(&job->base, in_fence);
if (ret)
goto fail;
goto fail_job;
kref_init(&job->refcount);
return 0;
fail_job:
drm_sched_job_cleanup(&job->base);
fail:
xa_destroy(&job->deps);
pm_runtime_put_autosuspend(v3d->drm.dev);
return ret;
}
static int
v3d_push_job(struct v3d_file_priv *v3d_priv,
struct v3d_job *job, enum v3d_queue queue)
static void
v3d_push_job(struct v3d_job *job)
{
int ret;
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
v3d_priv);
if (ret)
return ret;
drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
/* put by scheduler job completion */
kref_get(&job->refcount);
drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]);
return 0;
drm_sched_entity_push_job(&job->base);
}
static void
@ -562,7 +558,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
INIT_LIST_HEAD(&render->unref_list);
ret = v3d_job_init(v3d, file_priv, &render->base,
v3d_render_job_free, args->in_sync_rcl);
v3d_render_job_free, args->in_sync_rcl, V3D_RENDER);
if (ret) {
kfree(render);
return ret;
@ -576,7 +572,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
}
ret = v3d_job_init(v3d, file_priv, &bin->base,
v3d_job_free, args->in_sync_bcl);
v3d_job_free, args->in_sync_bcl, V3D_BIN);
if (ret) {
v3d_job_put(&render->base);
kfree(bin);
@ -598,7 +594,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
goto fail;
}
ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN);
if (ret) {
kfree(clean_job);
clean_job = NULL;
@ -633,31 +629,26 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (bin) {
bin->base.perfmon = render->base.perfmon;
v3d_perfmon_get(bin->base.perfmon);
ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN);
if (ret)
goto fail_unreserve;
v3d_push_job(&bin->base);
ret = drm_gem_fence_array_add(&render->base.deps,
dma_fence_get(bin->base.done_fence));
ret = drm_sched_job_add_dependency(&render->base.base,
dma_fence_get(bin->base.done_fence));
if (ret)
goto fail_unreserve;
}
ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
if (ret)
goto fail_unreserve;
v3d_push_job(&render->base);
if (clean_job) {
struct dma_fence *render_fence =
dma_fence_get(render->base.done_fence);
ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
ret = drm_sched_job_add_dependency(&clean_job->base,
render_fence);
if (ret)
goto fail_unreserve;
clean_job->perfmon = render->base.perfmon;
v3d_perfmon_get(clean_job->perfmon);
ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
if (ret)
goto fail_unreserve;
v3d_push_job(clean_job);
}
mutex_unlock(&v3d->sched_lock);
@ -682,10 +673,10 @@ fail_unreserve:
last_job->bo_count, &acquire_ctx);
fail:
if (bin)
v3d_job_put(&bin->base);
v3d_job_put(&render->base);
v3d_job_cleanup(&bin->base);
v3d_job_cleanup(&render->base);
if (clean_job)
v3d_job_put(clean_job);
v3d_job_cleanup(clean_job);
return ret;
}
@ -704,7 +695,6 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_tfu *args = data;
struct v3d_tfu_job *job;
struct ww_acquire_ctx acquire_ctx;
@ -717,7 +707,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = v3d_job_init(v3d, file_priv, &job->base,
v3d_job_free, args->in_sync);
v3d_job_free, args->in_sync, V3D_TFU);
if (ret) {
kfree(job);
return ret;
@ -761,9 +751,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
goto fail;
mutex_lock(&v3d->sched_lock);
ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU);
if (ret)
goto fail_unreserve;
v3d_push_job(&job->base);
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
@ -775,12 +763,8 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
return 0;
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
drm_gem_unlock_reservations(job->base.bo, job->base.bo_count,
&acquire_ctx);
fail:
v3d_job_put(&job->base);
v3d_job_cleanup(&job->base);
return ret;
}
@ -818,7 +802,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = v3d_job_init(v3d, file_priv, &job->base,
v3d_job_free, args->in_sync);
v3d_job_free, args->in_sync, V3D_CSD);
if (ret) {
kfree(job);
return ret;
@ -831,7 +815,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0, V3D_CACHE_CLEAN);
if (ret) {
v3d_job_put(&job->base);
kfree(clean_job);
@ -859,18 +843,14 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&v3d->sched_lock);
ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD);
v3d_push_job(&job->base);
ret = drm_sched_job_add_dependency(&clean_job->base,
dma_fence_get(job->base.done_fence));
if (ret)
goto fail_unreserve;
ret = drm_gem_fence_array_add(&clean_job->deps,
dma_fence_get(job->base.done_fence));
if (ret)
goto fail_unreserve;
ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
if (ret)
goto fail_unreserve;
v3d_push_job(clean_job);
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
@ -889,8 +869,8 @@ fail_unreserve:
drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
&acquire_ctx);
fail:
v3d_job_put(&job->base);
v3d_job_put(clean_job);
v3d_job_cleanup(&job->base);
v3d_job_cleanup(clean_job);
return ret;
}

View File

@ -13,7 +13,7 @@
* jobs when bulk background jobs are queued up, we submit a new job
* to the HW only when it has completed the last one, instead of
* filling up the CT[01]Q FIFOs with jobs. Similarly, we use
* v3d_job_dependency() to manage the dependency between bin and
* drm_sched_job_add_dependency() to manage the dependency between bin and
* render, instead of having the clients submit jobs using the HW's
* semaphores to interlock between them.
*/
@ -55,12 +55,11 @@ to_csd_job(struct drm_sched_job *sched_job)
}
static void
v3d_job_free(struct drm_sched_job *sched_job)
v3d_sched_job_free(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
drm_sched_job_cleanup(sched_job);
v3d_job_put(job);
v3d_job_cleanup(job);
}
static void
@ -73,28 +72,6 @@ v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
v3d_perfmon_start(v3d, job->perfmon);
}
/*
* Returns the fences that the job depends on, one by one.
*
* If placed in the scheduler's .dependency method, the corresponding
* .run_job won't be called until all of them have been signaled.
*/
static struct dma_fence *
v3d_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity)
{
struct v3d_job *job = to_v3d_job(sched_job);
/* XXX: Wait on a fence for switching the GMP if necessary,
* and then do so.
*/
if (!xa_empty(&job->deps))
return xa_erase(&job->deps, job->last_dep++);
return NULL;
}
static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
@ -373,38 +350,33 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
}
static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
.dependency = v3d_job_dependency,
.run_job = v3d_bin_job_run,
.timedout_job = v3d_bin_job_timedout,
.free_job = v3d_job_free,
.free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_render_sched_ops = {
.dependency = v3d_job_dependency,
.run_job = v3d_render_job_run,
.timedout_job = v3d_render_job_timedout,
.free_job = v3d_job_free,
.free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
.dependency = v3d_job_dependency,
.run_job = v3d_tfu_job_run,
.timedout_job = v3d_generic_job_timedout,
.free_job = v3d_job_free,
.free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
.dependency = v3d_job_dependency,
.run_job = v3d_csd_job_run,
.timedout_job = v3d_csd_job_timedout,
.free_job = v3d_job_free
.free_job = v3d_sched_job_free
};
static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
.dependency = v3d_job_dependency,
.run_job = v3d_cache_clean_job_run,
.timedout_job = v3d_generic_job_timedout,
.free_job = v3d_job_free
.free_job = v3d_sched_job_free
};
int

View File

@ -229,26 +229,19 @@ static const struct of_device_id vc4_dpi_dt_match[] = {
static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
{
struct device *dev = &dpi->pdev->dev;
struct drm_panel *panel;
struct drm_bridge *bridge;
int ret;
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
&panel, &bridge);
if (ret) {
bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(bridge)) {
/* If nothing was connected in the DT, that's not an
* error.
*/
if (ret == -ENODEV)
if (PTR_ERR(bridge) == -ENODEV)
return 0;
else
return ret;
return PTR_ERR(bridge);
}
if (panel)
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_DPI);
return drm_bridge_attach(dpi->encoder, bridge, NULL, 0);
}

View File

@ -50,13 +50,11 @@
#define DRIVER_PATCHLEVEL 0
/* Helper function for mapping the regs on a platform device. */
void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index)
{
struct resource *res;
void __iomem *map;
res = platform_get_resource(dev, IORESOURCE_MEM, index);
map = devm_ioremap_resource(&dev->dev, res);
map = devm_platform_ioremap_resource(pdev, index);
if (IS_ERR(map))
return map;

View File

@ -1497,7 +1497,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
struct vc4_dsi_encoder *vc4_dsi_encoder;
struct drm_panel *panel;
const struct of_device_id *match;
dma_cap_mask_t dma_mask;
int ret;
@ -1609,27 +1608,9 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
&panel, &dsi->bridge);
if (ret) {
/* If the bridge or panel pointed by dev->of_node is not
* enabled, just return 0 here so that we don't prevent the DRM
* dev from being registered. Of course that means the DSI
* encoder won't be exposed, but that's not a problem since
* nothing is connected to it.
*/
if (ret == -ENODEV)
return 0;
return ret;
}
if (panel) {
dsi->bridge = devm_drm_panel_bridge_add_typed(dev, panel,
DRM_MODE_CONNECTOR_DSI);
if (IS_ERR(dsi->bridge))
return PTR_ERR(dsi->bridge);
}
dsi->bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(dsi->bridge))
return PTR_ERR(dsi->bridge);
/* The esc clock rate is supposed to always be 100Mhz. */
ret = clk_set_rate(dsi->escape_clock, 100 * 1000000);
@ -1667,8 +1648,7 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
{
struct vc4_dsi *dsi = dev_get_drvdata(dev);
if (dsi->bridge)
pm_runtime_disable(dev);
pm_runtime_disable(dev);
/*
* Restore the bridge_chain so the bridge detach procedure can happen

View File

@ -38,6 +38,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
@ -50,87 +51,11 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
static const struct drm_gem_object_funcs vgem_gem_object_funcs;
static struct vgem_device {
struct drm_device drm;
struct platform_device *platform;
} *vgem_device;
static void vgem_gem_free_object(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
kvfree(vgem_obj->pages);
mutex_destroy(&vgem_obj->pages_lock);
if (obj->import_attach)
drm_prime_gem_destroy(obj, vgem_obj->table);
drm_gem_object_release(obj);
kfree(vgem_obj);
}
static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_vgem_gem_object *obj = vma->vm_private_data;
/* We don't use vmf->pgoff since that has the fake offset */
unsigned long vaddr = vmf->address;
vm_fault_t ret = VM_FAULT_SIGBUS;
loff_t num_pages;
pgoff_t page_offset;
page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
if (page_offset >= num_pages)
return VM_FAULT_SIGBUS;
mutex_lock(&obj->pages_lock);
if (obj->pages) {
get_page(obj->pages[page_offset]);
vmf->page = obj->pages[page_offset];
ret = 0;
}
mutex_unlock(&obj->pages_lock);
if (ret) {
struct page *page;
page = shmem_read_mapping_page(
file_inode(obj->base.filp)->i_mapping,
page_offset);
if (!IS_ERR(page)) {
vmf->page = page;
ret = 0;
} else switch (PTR_ERR(page)) {
case -ENOSPC:
case -ENOMEM:
ret = VM_FAULT_OOM;
break;
case -EBUSY:
ret = VM_FAULT_RETRY;
break;
case -EFAULT:
case -EINVAL:
ret = VM_FAULT_SIGBUS;
break;
default:
WARN_ON(PTR_ERR(page));
ret = VM_FAULT_SIGBUS;
break;
}
}
return ret;
}
static const struct vm_operations_struct vgem_gem_vm_ops = {
.fault = vgem_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
static int vgem_open(struct drm_device *dev, struct drm_file *file)
{
struct vgem_file *vfile;
@ -159,266 +84,30 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
kfree(vfile);
}
static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
unsigned long size)
{
struct drm_vgem_gem_object *obj;
int ret;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return ERR_PTR(-ENOMEM);
obj->base.funcs = &vgem_gem_object_funcs;
ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
if (ret) {
kfree(obj);
return ERR_PTR(ret);
}
mutex_init(&obj->pages_lock);
return obj;
}
static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
{
drm_gem_object_release(&obj->base);
kfree(obj);
}
static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
struct drm_file *file,
unsigned int *handle,
unsigned long size)
{
struct drm_vgem_gem_object *obj;
int ret;
obj = __vgem_gem_create(dev, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->base, handle);
if (ret) {
drm_gem_object_put(&obj->base);
return ERR_PTR(ret);
}
return &obj->base;
}
static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct drm_gem_object *gem_object;
u64 pitch, size;
pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
size = args->height * pitch;
if (size == 0)
return -EINVAL;
gem_object = vgem_gem_create(dev, file, &args->handle, size);
if (IS_ERR(gem_object))
return PTR_ERR(gem_object);
args->size = gem_object->size;
args->pitch = pitch;
drm_gem_object_put(gem_object);
DRM_DEBUG("Created object of size %llu\n", args->size);
return 0;
}
static struct drm_ioctl_desc vgem_ioctls[] = {
DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
};
static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
static struct drm_gem_object *vgem_gem_create_object(struct drm_device *dev, size_t size)
{
unsigned long flags = vma->vm_flags;
int ret;
struct drm_gem_shmem_object *obj;
ret = drm_gem_mmap(filp, vma);
if (ret)
return ret;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return NULL;
/* Keep the WC mmaping set by drm_gem_mmap() but our pages
* are ordinary and not special.
/*
* vgem doesn't have any begin/end cpu access ioctls, therefore must use
* coherent memory or dma-buf sharing just wont work.
*/
vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
}
obj->map_wc = true;
static const struct file_operations vgem_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.mmap = vgem_mmap,
.poll = drm_poll,
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.compat_ioctl = drm_compat_ioctl,
.release = drm_release,
};
static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
{
mutex_lock(&bo->pages_lock);
if (bo->pages_pin_count++ == 0) {
struct page **pages;
pages = drm_gem_get_pages(&bo->base);
if (IS_ERR(pages)) {
bo->pages_pin_count--;
mutex_unlock(&bo->pages_lock);
return pages;
}
bo->pages = pages;
}
mutex_unlock(&bo->pages_lock);
return bo->pages;
}
static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
{
mutex_lock(&bo->pages_lock);
if (--bo->pages_pin_count == 0) {
drm_gem_put_pages(&bo->base, bo->pages, true, true);
bo->pages = NULL;
}
mutex_unlock(&bo->pages_lock);
}
static int vgem_prime_pin(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >> PAGE_SHIFT;
struct page **pages;
pages = vgem_pin_pages(bo);
if (IS_ERR(pages))
return PTR_ERR(pages);
/* Flush the object from the CPU cache so that importers can rely
* on coherent indirect access via the exported dma-address.
*/
drm_clflush_pages(pages, n_pages);
return 0;
}
static void vgem_prime_unpin(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
vgem_unpin_pages(bo);
}
static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
}
static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
}
static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
struct drm_vgem_gem_object *obj;
int npages;
obj = __vgem_gem_create(dev, attach->dmabuf->size);
if (IS_ERR(obj))
return ERR_CAST(obj);
npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
obj->table = sg;
obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!obj->pages) {
__vgem_gem_destroy(obj);
return ERR_PTR(-ENOMEM);
}
obj->pages_pin_count++; /* perma-pinned */
drm_prime_sg_to_page_array(obj->table, obj->pages, npages);
return &obj->base;
}
static int vgem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >> PAGE_SHIFT;
struct page **pages;
void *vaddr;
pages = vgem_pin_pages(bo);
if (IS_ERR(pages))
return PTR_ERR(pages);
vaddr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
if (!vaddr)
return -ENOMEM;
dma_buf_map_set_vaddr(map, vaddr);
return 0;
}
static void vgem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
vunmap(map->vaddr);
vgem_unpin_pages(bo);
}
static int vgem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
int ret;
if (obj->size < vma->vm_end - vma->vm_start)
return -EINVAL;
if (!obj->filp)
return -ENODEV;
ret = call_mmap(obj->filp, vma);
if (ret)
return ret;
vma_set_file(vma, obj->filp);
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
return 0;
}
static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
.free = vgem_gem_free_object,
.pin = vgem_prime_pin,
.unpin = vgem_prime_unpin,
.get_sg_table = vgem_prime_get_sg_table,
.vmap = vgem_prime_vmap,
.vunmap = vgem_prime_vunmap,
.vm_ops = &vgem_gem_vm_ops,
};
static const struct drm_driver vgem_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = vgem_open,
@ -427,13 +116,8 @@ static const struct drm_driver vgem_driver = {
.num_ioctls = ARRAY_SIZE(vgem_ioctls),
.fops = &vgem_driver_fops,
.dumb_create = vgem_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = vgem_prime_import,
.gem_prime_import_sg_table = vgem_prime_import_sg_table,
.gem_prime_mmap = vgem_prime_mmap,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_create_object = vgem_gem_create_object,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,

View File

@ -26,6 +26,7 @@
#ifndef VIRTIO_DRV_H
#define VIRTIO_DRV_H
#include <linux/dma-direction.h>
#include <linux/virtio.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@ -459,4 +460,11 @@ bool virtio_gpu_is_vram(struct virtio_gpu_object *bo);
int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr);
struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
struct device *dev,
enum dma_data_direction dir);
void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
struct sg_table *sgt,
enum dma_data_direction dir);
#endif

Some files were not shown because too many files have changed in this diff Show More