Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next
The conflict in intel_drv.h tripped me up a bit since a patch in dinq moves all the functions around, but another one in drm-next removes a single function. So I'ev figured backing this into a backmerge would be good. i915_dma.c is just adjacent lines changed, nothing nefarious there. Conflicts: drivers/gpu/drm/i915/i915_dma.c drivers/gpu/drm/i915/intel_drv.h Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
commit
967ad7f148
@ -211,7 +211,6 @@ static struct drm_driver driver = {
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
|
||||
.gem_init_object = ast_gem_init_object,
|
||||
.gem_free_object = ast_gem_free_object,
|
||||
.dumb_create = ast_dumb_create,
|
||||
.dumb_map_offset = ast_dumb_mmap_offset,
|
||||
|
@ -323,7 +323,6 @@ extern int ast_dumb_create(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
||||
extern int ast_gem_init_object(struct drm_gem_object *obj);
|
||||
extern void ast_gem_free_object(struct drm_gem_object *obj);
|
||||
extern int ast_dumb_mmap_offset(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
|
@ -449,12 +449,6 @@ int ast_dumb_create(struct drm_file *file,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ast_gem_init_object(struct drm_gem_object *obj)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ast_bo_unref(struct ast_bo **bo)
|
||||
{
|
||||
struct ttm_buffer_object *tbo;
|
||||
|
@ -97,7 +97,6 @@ static struct drm_driver driver = {
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
.gem_init_object = cirrus_gem_init_object,
|
||||
.gem_free_object = cirrus_gem_free_object,
|
||||
.dumb_create = cirrus_dumb_create,
|
||||
.dumb_map_offset = cirrus_dumb_mmap_offset,
|
||||
|
@ -191,7 +191,6 @@ int cirrus_device_init(struct cirrus_device *cdev,
|
||||
struct pci_dev *pdev,
|
||||
uint32_t flags);
|
||||
void cirrus_device_fini(struct cirrus_device *cdev);
|
||||
int cirrus_gem_init_object(struct drm_gem_object *obj);
|
||||
void cirrus_gem_free_object(struct drm_gem_object *obj);
|
||||
int cirrus_dumb_mmap_offset(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
|
@ -255,12 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cirrus_gem_init_object(struct drm_gem_object *obj)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void cirrus_bo_unref(struct cirrus_bo **bo)
|
||||
{
|
||||
struct ttm_buffer_object *tbo;
|
||||
|
@ -334,7 +334,6 @@ int drm_addctx(struct drm_device *dev, void *data,
|
||||
|
||||
mutex_lock(&dev->ctxlist_mutex);
|
||||
list_add(&ctx_entry->head, &dev->ctxlist);
|
||||
++dev->ctx_count;
|
||||
mutex_unlock(&dev->ctxlist_mutex);
|
||||
|
||||
return 0;
|
||||
@ -432,7 +431,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
|
||||
if (pos->handle == ctx->handle) {
|
||||
list_del(&pos->head);
|
||||
kfree(pos);
|
||||
--dev->ctx_count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -109,9 +109,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
|
||||
* then culled (based on validity and the @maxX, @maxY parameters) and put into
|
||||
* the normal modes list.
|
||||
*
|
||||
* Intended to be use as a generic implementation of the ->probe() @connector
|
||||
* callback for drivers that use the crtc helpers for output mode filtering and
|
||||
* detection.
|
||||
* Intended to be use as a generic implementation of the ->fill_modes()
|
||||
* @connector vfunc for drivers that use the crtc helpers for output mode
|
||||
* filtering and detection.
|
||||
*
|
||||
* RETURNS:
|
||||
* Number of modes found on @connector.
|
||||
|
@ -228,12 +228,12 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
|
||||
EXPORT_SYMBOL(i2c_dp_aux_add_bus);
|
||||
|
||||
/* Helpers for DP link training */
|
||||
static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
|
||||
static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
|
||||
{
|
||||
return link_status[r - DP_LANE0_1_STATUS];
|
||||
}
|
||||
|
||||
static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane)
|
||||
{
|
||||
int i = DP_LANE0_1_STATUS + (lane >> 1);
|
||||
@ -242,7 +242,7 @@ static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
return (l >> s) & 0xf;
|
||||
}
|
||||
|
||||
bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane_count)
|
||||
{
|
||||
u8 lane_align;
|
||||
@ -262,7 +262,7 @@ bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_channel_eq_ok);
|
||||
|
||||
bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane_count)
|
||||
{
|
||||
int lane;
|
||||
@ -277,7 +277,7 @@ bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
|
||||
|
||||
u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane)
|
||||
{
|
||||
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
|
||||
@ -290,7 +290,7 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
|
||||
|
||||
u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane)
|
||||
{
|
||||
int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
|
||||
@ -303,7 +303,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
|
||||
|
||||
void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
|
||||
void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
|
||||
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
|
||||
udelay(100);
|
||||
else
|
||||
@ -311,7 +311,7 @@ void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
|
||||
|
||||
void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
|
||||
void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
|
||||
if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
|
||||
udelay(400);
|
||||
else
|
||||
|
@ -171,76 +171,6 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
|
||||
|
||||
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
|
||||
|
||||
/**
|
||||
* drm_legacy_dev_reinit
|
||||
*
|
||||
* Reinitializes a legacy/ums drm device in it's lastclose function.
|
||||
*/
|
||||
static void drm_legacy_dev_reinit(struct drm_device *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return;
|
||||
|
||||
atomic_set(&dev->ioctl_count, 0);
|
||||
atomic_set(&dev->vma_count, 0);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
|
||||
atomic_set(&dev->counts[i], 0);
|
||||
|
||||
dev->sigdata.lock = NULL;
|
||||
|
||||
dev->context_flag = 0;
|
||||
dev->last_context = 0;
|
||||
dev->if_version = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Take down the DRM device.
|
||||
*
|
||||
* \param dev DRM device structure.
|
||||
*
|
||||
* Frees every resource in \p dev.
|
||||
*
|
||||
* \sa drm_device
|
||||
*/
|
||||
int drm_lastclose(struct drm_device * dev)
|
||||
{
|
||||
struct drm_vma_entry *vma, *vma_temp;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
if (dev->driver->lastclose)
|
||||
dev->driver->lastclose(dev);
|
||||
DRM_DEBUG("driver lastclose completed\n");
|
||||
|
||||
if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
drm_irq_uninstall(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
drm_agp_clear(dev);
|
||||
|
||||
drm_legacy_sg_cleanup(dev);
|
||||
|
||||
/* Clear vma list (only built for debugging) */
|
||||
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
|
||||
list_del(&vma->head);
|
||||
kfree(vma);
|
||||
}
|
||||
|
||||
drm_legacy_dma_takedown(dev);
|
||||
|
||||
dev->dev_mapping = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
drm_legacy_dev_reinit(dev);
|
||||
|
||||
DRM_DEBUG("lastclose completed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** File operations structure */
|
||||
static const struct file_operations drm_stub_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
@ -386,7 +316,6 @@ long drm_ioctl(struct file *filp,
|
||||
return -ENODEV;
|
||||
|
||||
atomic_inc(&dev->ioctl_count);
|
||||
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
|
||||
++file_priv->ioctl_count;
|
||||
|
||||
if ((nr >= DRM_CORE_IOCTL_COUNT) &&
|
||||
|
@ -1264,6 +1264,18 @@ struct edid *drm_get_edid(struct drm_connector *connector,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_get_edid);
|
||||
|
||||
/**
|
||||
* drm_edid_duplicate - duplicate an EDID and the extensions
|
||||
* @edid: EDID to duplicate
|
||||
*
|
||||
* Return duplicate edid or NULL on allocation failure.
|
||||
*/
|
||||
struct edid *drm_edid_duplicate(const struct edid *edid)
|
||||
{
|
||||
return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_edid_duplicate);
|
||||
|
||||
/*** EDID parsing ***/
|
||||
|
||||
/**
|
||||
@ -3013,6 +3025,8 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
|
||||
/* Speaker Allocation Data Block */
|
||||
if (dbl == 3) {
|
||||
*sadb = kmalloc(dbl, GFP_KERNEL);
|
||||
if (!*sadb)
|
||||
return -ENOMEM;
|
||||
memcpy(*sadb, &db[1], dbl);
|
||||
count = dbl;
|
||||
break;
|
||||
|
@ -32,7 +32,7 @@ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
|
||||
"from built-in data or /lib/firmware instead. ");
|
||||
|
||||
#define GENERIC_EDIDS 5
|
||||
static char *generic_edid_name[GENERIC_EDIDS] = {
|
||||
static const char *generic_edid_name[GENERIC_EDIDS] = {
|
||||
"edid/1024x768.bin",
|
||||
"edid/1280x1024.bin",
|
||||
"edid/1600x1200.bin",
|
||||
@ -40,7 +40,7 @@ static char *generic_edid_name[GENERIC_EDIDS] = {
|
||||
"edid/1920x1080.bin",
|
||||
};
|
||||
|
||||
static u8 generic_edid[GENERIC_EDIDS][128] = {
|
||||
static const u8 generic_edid[GENERIC_EDIDS][128] = {
|
||||
{
|
||||
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
|
||||
0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
@ -133,63 +133,68 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
|
||||
},
|
||||
};
|
||||
|
||||
static int edid_size(const u8 *edid, int data_size)
|
||||
{
|
||||
if (data_size < EDID_LENGTH)
|
||||
return 0;
|
||||
|
||||
return (edid[0x7e] + 1) * EDID_LENGTH;
|
||||
}
|
||||
|
||||
static u8 *edid_load(struct drm_connector *connector, const char *name,
|
||||
const char *connector_name)
|
||||
{
|
||||
const struct firmware *fw;
|
||||
struct platform_device *pdev;
|
||||
u8 *fwdata = NULL, *edid, *new_edid;
|
||||
int fwsize, expected;
|
||||
int builtin = 0, err = 0;
|
||||
const struct firmware *fw = NULL;
|
||||
const u8 *fwdata;
|
||||
u8 *edid;
|
||||
int fwsize, builtin;
|
||||
int i, valid_extensions = 0;
|
||||
bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
|
||||
|
||||
pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
|
||||
if (IS_ERR(pdev)) {
|
||||
DRM_ERROR("Failed to register EDID firmware platform device "
|
||||
"for connector \"%s\"\n", connector_name);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = request_firmware(&fw, name, &pdev->dev);
|
||||
platform_device_unregister(pdev);
|
||||
|
||||
if (err) {
|
||||
i = 0;
|
||||
while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
|
||||
i++;
|
||||
if (i < GENERIC_EDIDS) {
|
||||
err = 0;
|
||||
builtin = 1;
|
||||
builtin = 0;
|
||||
for (i = 0; i < GENERIC_EDIDS; i++) {
|
||||
if (strcmp(name, generic_edid_name[i]) == 0) {
|
||||
fwdata = generic_edid[i];
|
||||
fwsize = sizeof(generic_edid[i]);
|
||||
builtin = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!builtin) {
|
||||
struct platform_device *pdev;
|
||||
int err;
|
||||
|
||||
if (err) {
|
||||
DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
|
||||
name, err);
|
||||
goto out;
|
||||
}
|
||||
pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
|
||||
if (IS_ERR(pdev)) {
|
||||
DRM_ERROR("Failed to register EDID firmware platform device "
|
||||
"for connector \"%s\"\n", connector_name);
|
||||
return ERR_CAST(pdev);
|
||||
}
|
||||
|
||||
if (fwdata == NULL) {
|
||||
fwdata = (u8 *) fw->data;
|
||||
err = request_firmware(&fw, name, &pdev->dev);
|
||||
platform_device_unregister(pdev);
|
||||
if (err) {
|
||||
DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
|
||||
name, err);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
fwdata = fw->data;
|
||||
fwsize = fw->size;
|
||||
}
|
||||
|
||||
expected = (fwdata[0x7e] + 1) * EDID_LENGTH;
|
||||
if (expected != fwsize) {
|
||||
if (edid_size(fwdata, fwsize) != fwsize) {
|
||||
DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
|
||||
"(expected %d, got %d)\n", name, expected, (int) fwsize);
|
||||
err = -EINVAL;
|
||||
goto relfw_out;
|
||||
"(expected %d, got %d\n", name,
|
||||
edid_size(fwdata, fwsize), (int)fwsize);
|
||||
edid = ERR_PTR(-EINVAL);
|
||||
goto out;
|
||||
}
|
||||
|
||||
edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
|
||||
if (edid == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto relfw_out;
|
||||
edid = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
|
||||
@ -197,8 +202,8 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
|
||||
DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
|
||||
name);
|
||||
kfree(edid);
|
||||
err = -EINVAL;
|
||||
goto relfw_out;
|
||||
edid = ERR_PTR(-EINVAL);
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 1; i <= edid[0x7e]; i++) {
|
||||
@ -210,19 +215,18 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
|
||||
}
|
||||
|
||||
if (valid_extensions != edid[0x7e]) {
|
||||
u8 *new_edid;
|
||||
|
||||
edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
|
||||
DRM_INFO("Found %d valid extensions instead of %d in EDID data "
|
||||
"\"%s\" for connector \"%s\"\n", valid_extensions,
|
||||
edid[0x7e], name, connector_name);
|
||||
edid[0x7e] = valid_extensions;
|
||||
|
||||
new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
|
||||
GFP_KERNEL);
|
||||
if (new_edid == NULL) {
|
||||
err = -ENOMEM;
|
||||
kfree(edid);
|
||||
goto relfw_out;
|
||||
}
|
||||
edid = new_edid;
|
||||
GFP_KERNEL);
|
||||
if (new_edid)
|
||||
edid = new_edid;
|
||||
}
|
||||
|
||||
DRM_INFO("Got %s EDID base block and %d extension%s from "
|
||||
@ -230,13 +234,9 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
|
||||
"external", valid_extensions, valid_extensions == 1 ? "" : "s",
|
||||
name, connector_name);
|
||||
|
||||
relfw_out:
|
||||
release_firmware(fw);
|
||||
|
||||
out:
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
if (fw)
|
||||
release_firmware(fw);
|
||||
return edid;
|
||||
}
|
||||
|
||||
|
@ -852,7 +852,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_mode_set *modeset;
|
||||
struct drm_crtc *crtc;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
@ -863,8 +862,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
|
||||
}
|
||||
|
||||
for (i = 0; i < fb_helper->crtc_count; i++) {
|
||||
crtc = fb_helper->crtc_info[i].mode_set.crtc;
|
||||
|
||||
modeset = &fb_helper->crtc_info[i].mode_set;
|
||||
|
||||
modeset->x = var->xoffset;
|
||||
@ -1360,7 +1357,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_helper_funcs *connector_funcs;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_fb_helper_crtc *best_crtc;
|
||||
int my_score, best_score, score;
|
||||
struct drm_fb_helper_crtc **crtcs, *crtc;
|
||||
struct drm_fb_helper_connector *fb_helper_conn;
|
||||
@ -1372,7 +1368,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
|
||||
connector = fb_helper_conn->connector;
|
||||
|
||||
best_crtcs[n] = NULL;
|
||||
best_crtc = NULL;
|
||||
best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
|
||||
if (modes[n] == NULL)
|
||||
return best_score;
|
||||
@ -1421,7 +1416,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
|
||||
score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
|
||||
width, height);
|
||||
if (score > best_score) {
|
||||
best_crtc = crtc;
|
||||
best_score = score;
|
||||
memcpy(best_crtcs, crtcs,
|
||||
dev->mode_config.num_connector *
|
||||
@ -1588,8 +1582,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
|
||||
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
||||
{
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
int count = 0;
|
||||
u32 max_width, max_height, bpp_sel;
|
||||
u32 max_width, max_height;
|
||||
|
||||
if (!fb_helper->fb)
|
||||
return 0;
|
||||
@ -1604,10 +1597,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
||||
|
||||
max_width = fb_helper->fb->width;
|
||||
max_height = fb_helper->fb->height;
|
||||
bpp_sel = fb_helper->fb->bits_per_pixel;
|
||||
|
||||
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
|
||||
max_height);
|
||||
drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
|
||||
mutex_unlock(&fb_helper->dev->mode_config.mutex);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
|
@ -113,7 +113,6 @@ int drm_open(struct inode *inode, struct file *filp)
|
||||
retcode = drm_open_helper(inode, filp, dev);
|
||||
if (retcode)
|
||||
goto err_undo;
|
||||
atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
|
||||
if (need_setup) {
|
||||
retcode = drm_setup(dev);
|
||||
if (retcode)
|
||||
@ -385,6 +384,71 @@ static void drm_events_release(struct drm_file *file_priv)
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_legacy_dev_reinit
|
||||
*
|
||||
* Reinitializes a legacy/ums drm device in it's lastclose function.
|
||||
*/
|
||||
static void drm_legacy_dev_reinit(struct drm_device *dev)
|
||||
{
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return;
|
||||
|
||||
atomic_set(&dev->ioctl_count, 0);
|
||||
atomic_set(&dev->vma_count, 0);
|
||||
|
||||
dev->sigdata.lock = NULL;
|
||||
|
||||
dev->context_flag = 0;
|
||||
dev->last_context = 0;
|
||||
dev->if_version = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Take down the DRM device.
|
||||
*
|
||||
* \param dev DRM device structure.
|
||||
*
|
||||
* Frees every resource in \p dev.
|
||||
*
|
||||
* \sa drm_device
|
||||
*/
|
||||
int drm_lastclose(struct drm_device * dev)
|
||||
{
|
||||
struct drm_vma_entry *vma, *vma_temp;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
if (dev->driver->lastclose)
|
||||
dev->driver->lastclose(dev);
|
||||
DRM_DEBUG("driver lastclose completed\n");
|
||||
|
||||
if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
drm_irq_uninstall(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
drm_agp_clear(dev);
|
||||
|
||||
drm_legacy_sg_cleanup(dev);
|
||||
|
||||
/* Clear vma list (only built for debugging) */
|
||||
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
|
||||
list_del(&vma->head);
|
||||
kfree(vma);
|
||||
}
|
||||
|
||||
drm_legacy_dma_takedown(dev);
|
||||
|
||||
dev->dev_mapping = NULL;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
drm_legacy_dev_reinit(dev);
|
||||
|
||||
DRM_DEBUG("lastclose completed\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Release file.
|
||||
*
|
||||
@ -454,7 +518,6 @@ int drm_release(struct inode *inode, struct file *filp)
|
||||
|
||||
list_del(&pos->head);
|
||||
kfree(pos);
|
||||
--dev->ctx_count;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -516,7 +579,6 @@ int drm_release(struct inode *inode, struct file *filp)
|
||||
* End inline drm_release
|
||||
*/
|
||||
|
||||
atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
|
||||
if (!--dev->open_count) {
|
||||
if (atomic_read(&dev->ioctl_count)) {
|
||||
DRM_ERROR("Device busy: %d\n",
|
||||
|
@ -160,35 +160,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_private_object_init);
|
||||
|
||||
/**
|
||||
* Allocate a GEM object of the specified size with shmfs backing store
|
||||
*/
|
||||
struct drm_gem_object *
|
||||
drm_gem_object_alloc(struct drm_device *dev, size_t size)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
|
||||
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
||||
if (!obj)
|
||||
goto free;
|
||||
|
||||
if (drm_gem_object_init(dev, obj, size) != 0)
|
||||
goto free;
|
||||
|
||||
if (dev->driver->gem_init_object != NULL &&
|
||||
dev->driver->gem_init_object(obj) != 0) {
|
||||
goto fput;
|
||||
}
|
||||
return obj;
|
||||
fput:
|
||||
/* Object_init mangles the global counters - readjust them. */
|
||||
fput(obj->filp);
|
||||
free:
|
||||
kfree(obj);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_object_alloc);
|
||||
|
||||
static void
|
||||
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
|
||||
{
|
||||
|
@ -67,7 +67,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
|
||||
{
|
||||
int ret;
|
||||
struct drm_global_item *item = &glob[ref->global_type];
|
||||
void *object;
|
||||
|
||||
mutex_lock(&item->mutex);
|
||||
if (item->refcount == 0) {
|
||||
@ -85,7 +84,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
|
||||
}
|
||||
++item->refcount;
|
||||
ref->object = item->object;
|
||||
object = item->object;
|
||||
mutex_unlock(&item->mutex);
|
||||
return 0;
|
||||
out_err:
|
||||
|
@ -163,13 +163,13 @@ int drm_vblank_info(struct seq_file *m, void *data)
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
|
||||
seq_printf(m, "CRTC %d enable: %d\n",
|
||||
crtc, atomic_read(&dev->vblank_refcount[crtc]));
|
||||
crtc, atomic_read(&dev->vblank[crtc].refcount));
|
||||
seq_printf(m, "CRTC %d counter: %d\n",
|
||||
crtc, drm_vblank_count(dev, crtc));
|
||||
seq_printf(m, "CRTC %d last wait: %d\n",
|
||||
crtc, dev->last_vblank_wait[crtc]);
|
||||
crtc, dev->vblank[crtc].last_wait);
|
||||
seq_printf(m, "CRTC %d in modeset: %d\n",
|
||||
crtc, dev->vblank_inmodeset[crtc]);
|
||||
crtc, dev->vblank[crtc].inmodeset);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return 0;
|
||||
|
@ -43,9 +43,8 @@
|
||||
#include <linux/export.h>
|
||||
|
||||
/* Access macro for slots in vblank timestamp ringbuffer. */
|
||||
#define vblanktimestamp(dev, crtc, count) ( \
|
||||
(dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
|
||||
((count) % DRM_VBLANKTIME_RBSIZE)])
|
||||
#define vblanktimestamp(dev, crtc, count) \
|
||||
((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
|
||||
|
||||
/* Retry timestamp calculation up to 3 times to satisfy
|
||||
* drm_timestamp_precision before giving up.
|
||||
@ -89,8 +88,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
|
||||
*/
|
||||
static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
|
||||
{
|
||||
memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
|
||||
DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
|
||||
memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -115,7 +113,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
|
||||
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
|
||||
|
||||
dev->driver->disable_vblank(dev, crtc);
|
||||
dev->vblank_enabled[crtc] = 0;
|
||||
dev->vblank[crtc].enabled = false;
|
||||
|
||||
/* No further vblank irq's will be processed after
|
||||
* this point. Get current hardware vblank count and
|
||||
@ -130,9 +128,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
|
||||
* delayed gpu counter increment.
|
||||
*/
|
||||
do {
|
||||
dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
|
||||
dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
|
||||
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
|
||||
} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
|
||||
} while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
|
||||
|
||||
if (!count)
|
||||
vblrc = 0;
|
||||
@ -140,7 +138,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
|
||||
/* Compute time difference to stored timestamp of last vblank
|
||||
* as updated by last invocation of drm_handle_vblank() in vblank irq.
|
||||
*/
|
||||
vblcount = atomic_read(&dev->_vblank_count[crtc]);
|
||||
vblcount = atomic_read(&dev->vblank[crtc].count);
|
||||
diff_ns = timeval_to_ns(&tvblank) -
|
||||
timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
|
||||
|
||||
@ -157,7 +155,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
|
||||
* hope for the best.
|
||||
*/
|
||||
if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
|
||||
atomic_inc(&dev->_vblank_count[crtc]);
|
||||
atomic_inc(&dev->vblank[crtc].count);
|
||||
smp_mb__after_atomic_inc();
|
||||
}
|
||||
|
||||
@ -178,8 +176,8 @@ static void vblank_disable_fn(unsigned long arg)
|
||||
|
||||
for (i = 0; i < dev->num_crtcs; i++) {
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
|
||||
dev->vblank_enabled[i]) {
|
||||
if (atomic_read(&dev->vblank[i].refcount) == 0 &&
|
||||
dev->vblank[i].enabled) {
|
||||
DRM_DEBUG("disabling vblank on crtc %d\n", i);
|
||||
vblank_disable_and_save(dev, i);
|
||||
}
|
||||
@ -197,14 +195,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
|
||||
|
||||
vblank_disable_fn((unsigned long)dev);
|
||||
|
||||
kfree(dev->vbl_queue);
|
||||
kfree(dev->_vblank_count);
|
||||
kfree(dev->vblank_refcount);
|
||||
kfree(dev->vblank_enabled);
|
||||
kfree(dev->last_vblank);
|
||||
kfree(dev->last_vblank_wait);
|
||||
kfree(dev->vblank_inmodeset);
|
||||
kfree(dev->_vblank_time);
|
||||
kfree(dev->vblank);
|
||||
|
||||
dev->num_crtcs = 0;
|
||||
}
|
||||
@ -221,40 +212,12 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
||||
|
||||
dev->num_crtcs = num_crtcs;
|
||||
|
||||
dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
|
||||
GFP_KERNEL);
|
||||
if (!dev->vbl_queue)
|
||||
dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
|
||||
if (!dev->vblank)
|
||||
goto err;
|
||||
|
||||
dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
|
||||
if (!dev->_vblank_count)
|
||||
goto err;
|
||||
|
||||
dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
|
||||
GFP_KERNEL);
|
||||
if (!dev->vblank_refcount)
|
||||
goto err;
|
||||
|
||||
dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
|
||||
if (!dev->vblank_enabled)
|
||||
goto err;
|
||||
|
||||
dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
|
||||
if (!dev->last_vblank)
|
||||
goto err;
|
||||
|
||||
dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
|
||||
if (!dev->last_vblank_wait)
|
||||
goto err;
|
||||
|
||||
dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
|
||||
if (!dev->vblank_inmodeset)
|
||||
goto err;
|
||||
|
||||
dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
|
||||
sizeof(struct timeval), GFP_KERNEL);
|
||||
if (!dev->_vblank_time)
|
||||
goto err;
|
||||
for (i = 0; i < num_crtcs; i++)
|
||||
init_waitqueue_head(&dev->vblank[i].queue);
|
||||
|
||||
DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
|
||||
|
||||
@ -264,14 +227,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
||||
else
|
||||
DRM_INFO("No driver support for vblank timestamp query.\n");
|
||||
|
||||
/* Zero per-crtc vblank stuff */
|
||||
for (i = 0; i < num_crtcs; i++) {
|
||||
init_waitqueue_head(&dev->vbl_queue[i]);
|
||||
atomic_set(&dev->_vblank_count[i], 0);
|
||||
atomic_set(&dev->vblank_refcount[i], 0);
|
||||
}
|
||||
dev->vblank_disable_allowed = false;
|
||||
|
||||
dev->vblank_disable_allowed = 0;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
@ -336,7 +293,7 @@ int drm_irq_install(struct drm_device *dev)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
dev->irq_enabled = 1;
|
||||
dev->irq_enabled = true;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
|
||||
@ -359,7 +316,7 @@ int drm_irq_install(struct drm_device *dev)
|
||||
|
||||
if (ret < 0) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
dev->irq_enabled = 0;
|
||||
dev->irq_enabled = false;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
@ -373,7 +330,7 @@ int drm_irq_install(struct drm_device *dev)
|
||||
|
||||
if (ret < 0) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
dev->irq_enabled = 0;
|
||||
dev->irq_enabled = false;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (!drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
vga_client_register(dev->pdev, NULL, NULL, NULL);
|
||||
@ -394,14 +351,15 @@ EXPORT_SYMBOL(drm_irq_install);
|
||||
int drm_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
int irq_enabled, i;
|
||||
bool irq_enabled;
|
||||
int i;
|
||||
|
||||
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
irq_enabled = dev->irq_enabled;
|
||||
dev->irq_enabled = 0;
|
||||
dev->irq_enabled = false;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/*
|
||||
@ -410,9 +368,9 @@ int drm_irq_uninstall(struct drm_device *dev)
|
||||
if (dev->num_crtcs) {
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
for (i = 0; i < dev->num_crtcs; i++) {
|
||||
DRM_WAKEUP(&dev->vbl_queue[i]);
|
||||
dev->vblank_enabled[i] = 0;
|
||||
dev->last_vblank[i] =
|
||||
DRM_WAKEUP(&dev->vblank[i].queue);
|
||||
dev->vblank[i].enabled = false;
|
||||
dev->vblank[i].last =
|
||||
dev->driver->get_vblank_counter(dev, i);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
@ -795,7 +753,7 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
|
||||
*/
|
||||
u32 drm_vblank_count(struct drm_device *dev, int crtc)
|
||||
{
|
||||
return atomic_read(&dev->_vblank_count[crtc]);
|
||||
return atomic_read(&dev->vblank[crtc].count);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_count);
|
||||
|
||||
@ -824,10 +782,10 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
|
||||
* a seqlock.
|
||||
*/
|
||||
do {
|
||||
cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
|
||||
cur_vblank = atomic_read(&dev->vblank[crtc].count);
|
||||
*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
|
||||
smp_rmb();
|
||||
} while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
|
||||
} while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
|
||||
|
||||
return cur_vblank;
|
||||
}
|
||||
@ -914,12 +872,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
||||
} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
|
||||
|
||||
/* Deal with counter wrap */
|
||||
diff = cur_vblank - dev->last_vblank[crtc];
|
||||
if (cur_vblank < dev->last_vblank[crtc]) {
|
||||
diff = cur_vblank - dev->vblank[crtc].last;
|
||||
if (cur_vblank < dev->vblank[crtc].last) {
|
||||
diff += dev->max_vblank_count;
|
||||
|
||||
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
|
||||
crtc, dev->last_vblank[crtc], cur_vblank, diff);
|
||||
crtc, dev->vblank[crtc].last, cur_vblank, diff);
|
||||
}
|
||||
|
||||
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
|
||||
@ -930,12 +888,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
|
||||
* reinitialize delayed at next vblank interrupt in that case.
|
||||
*/
|
||||
if (rc) {
|
||||
tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
|
||||
tslot = atomic_read(&dev->vblank[crtc].count) + diff;
|
||||
vblanktimestamp(dev, crtc, tslot) = t_vblank;
|
||||
}
|
||||
|
||||
smp_mb__before_atomic_inc();
|
||||
atomic_add(diff, &dev->_vblank_count[crtc]);
|
||||
atomic_add(diff, &dev->vblank[crtc].count);
|
||||
smp_mb__after_atomic_inc();
|
||||
}
|
||||
|
||||
@ -957,9 +915,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
|
||||
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
/* Going from 0->1 means we have to enable interrupts again */
|
||||
if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
|
||||
if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
|
||||
spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
|
||||
if (!dev->vblank_enabled[crtc]) {
|
||||
if (!dev->vblank[crtc].enabled) {
|
||||
/* Enable vblank irqs under vblank_time_lock protection.
|
||||
* All vblank count & timestamp updates are held off
|
||||
* until we are done reinitializing master counter and
|
||||
@ -970,16 +928,16 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
|
||||
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
|
||||
crtc, ret);
|
||||
if (ret)
|
||||
atomic_dec(&dev->vblank_refcount[crtc]);
|
||||
atomic_dec(&dev->vblank[crtc].refcount);
|
||||
else {
|
||||
dev->vblank_enabled[crtc] = 1;
|
||||
dev->vblank[crtc].enabled = true;
|
||||
drm_update_vblank_count(dev, crtc);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
|
||||
} else {
|
||||
if (!dev->vblank_enabled[crtc]) {
|
||||
atomic_dec(&dev->vblank_refcount[crtc]);
|
||||
if (!dev->vblank[crtc].enabled) {
|
||||
atomic_dec(&dev->vblank[crtc].refcount);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -999,10 +957,10 @@ EXPORT_SYMBOL(drm_vblank_get);
|
||||
*/
|
||||
void drm_vblank_put(struct drm_device *dev, int crtc)
|
||||
{
|
||||
BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
|
||||
BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
|
||||
|
||||
/* Last user schedules interrupt disable */
|
||||
if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
|
||||
if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
|
||||
(drm_vblank_offdelay > 0))
|
||||
mod_timer(&dev->vblank_disable_timer,
|
||||
jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
|
||||
@ -1025,7 +983,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
|
||||
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
vblank_disable_and_save(dev, crtc);
|
||||
DRM_WAKEUP(&dev->vbl_queue[crtc]);
|
||||
DRM_WAKEUP(&dev->vblank[crtc].queue);
|
||||
|
||||
/* Send any queued vblank events, lest the natives grow disquiet */
|
||||
seq = drm_vblank_count_and_time(dev, crtc, &now);
|
||||
@ -1067,10 +1025,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
|
||||
* to avoid corrupting the count if multiple, mismatch calls occur),
|
||||
* so that interrupts remain enabled in the interim.
|
||||
*/
|
||||
if (!dev->vblank_inmodeset[crtc]) {
|
||||
dev->vblank_inmodeset[crtc] = 0x1;
|
||||
if (!dev->vblank[crtc].inmodeset) {
|
||||
dev->vblank[crtc].inmodeset = 0x1;
|
||||
if (drm_vblank_get(dev, crtc) == 0)
|
||||
dev->vblank_inmodeset[crtc] |= 0x2;
|
||||
dev->vblank[crtc].inmodeset |= 0x2;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_pre_modeset);
|
||||
@ -1083,15 +1041,15 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
|
||||
if (!dev->num_crtcs)
|
||||
return;
|
||||
|
||||
if (dev->vblank_inmodeset[crtc]) {
|
||||
if (dev->vblank[crtc].inmodeset) {
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
dev->vblank_disable_allowed = 1;
|
||||
dev->vblank_disable_allowed = true;
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
|
||||
if (dev->vblank_inmodeset[crtc] & 0x2)
|
||||
if (dev->vblank[crtc].inmodeset & 0x2)
|
||||
drm_vblank_put(dev, crtc);
|
||||
|
||||
dev->vblank_inmodeset[crtc] = 0;
|
||||
dev->vblank[crtc].inmodeset = 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_post_modeset);
|
||||
@ -1288,8 +1246,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
|
||||
|
||||
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
|
||||
vblwait->request.sequence, crtc);
|
||||
dev->last_vblank_wait[crtc] = vblwait->request.sequence;
|
||||
DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
|
||||
dev->vblank[crtc].last_wait = vblwait->request.sequence;
|
||||
DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
|
||||
(((drm_vblank_count(dev, crtc) -
|
||||
vblwait->request.sequence) <= (1 << 23)) ||
|
||||
!dev->irq_enabled));
|
||||
@ -1367,7 +1325,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
|
||||
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
|
||||
|
||||
/* Vblank irq handling disabled. Nothing to do. */
|
||||
if (!dev->vblank_enabled[crtc]) {
|
||||
if (!dev->vblank[crtc].enabled) {
|
||||
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
|
||||
return false;
|
||||
}
|
||||
@ -1377,7 +1335,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
|
||||
*/
|
||||
|
||||
/* Get current timestamp and count. */
|
||||
vblcount = atomic_read(&dev->_vblank_count[crtc]);
|
||||
vblcount = atomic_read(&dev->vblank[crtc].count);
|
||||
drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
|
||||
|
||||
/* Compute time difference to timestamp of last vblank */
|
||||
@ -1401,14 +1359,14 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
|
||||
* the timestamp computed above.
|
||||
*/
|
||||
smp_mb__before_atomic_inc();
|
||||
atomic_inc(&dev->_vblank_count[crtc]);
|
||||
atomic_inc(&dev->vblank[crtc].count);
|
||||
smp_mb__after_atomic_inc();
|
||||
} else {
|
||||
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
|
||||
crtc, (int) diff_ns);
|
||||
}
|
||||
|
||||
DRM_WAKEUP(&dev->vbl_queue[crtc]);
|
||||
DRM_WAKEUP(&dev->vblank[crtc].queue);
|
||||
drm_handle_vblank_events(dev, crtc);
|
||||
|
||||
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
|
||||
|
@ -86,7 +86,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
if (drm_lock_take(&master->lock, lock->context)) {
|
||||
master->lock.file_priv = file_priv;
|
||||
master->lock.lock_time = jiffies;
|
||||
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
|
||||
break; /* Got lock */
|
||||
}
|
||||
|
||||
@ -157,8 +156,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
|
||||
|
||||
if (drm_lock_free(&master->lock, lock->context)) {
|
||||
/* FIXME: Should really bail out here. */
|
||||
}
|
||||
|
@ -322,83 +322,36 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
dev = drm_dev_alloc(driver, &pdev->dev);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = pci_enable_device(pdev);
|
||||
if (ret)
|
||||
goto err_g1;
|
||||
goto err_free;
|
||||
|
||||
dev->pdev = pdev;
|
||||
dev->dev = &pdev->dev;
|
||||
|
||||
dev->pci_device = pdev->device;
|
||||
dev->pci_vendor = pdev->vendor;
|
||||
|
||||
#ifdef __alpha__
|
||||
dev->hose = pdev->sysdata;
|
||||
#endif
|
||||
|
||||
mutex_lock(&drm_global_mutex);
|
||||
|
||||
if ((ret = drm_fill_in_dev(dev, ent, driver))) {
|
||||
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
|
||||
goto err_g2;
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
pci_set_drvdata(pdev, dev);
|
||||
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
|
||||
if (ret)
|
||||
goto err_g2;
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
|
||||
ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
|
||||
if (ret)
|
||||
goto err_g21;
|
||||
}
|
||||
|
||||
if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
|
||||
goto err_g3;
|
||||
|
||||
if (dev->driver->load) {
|
||||
ret = dev->driver->load(dev, ent->driver_data);
|
||||
if (ret)
|
||||
goto err_g4;
|
||||
}
|
||||
|
||||
/* setup the grouping for the legacy output */
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = drm_mode_group_init_legacy_group(dev,
|
||||
&dev->primary->mode_group);
|
||||
if (ret)
|
||||
goto err_g4;
|
||||
}
|
||||
|
||||
list_add_tail(&dev->driver_item, &driver->device_list);
|
||||
ret = drm_dev_register(dev, ent->driver_data);
|
||||
if (ret)
|
||||
goto err_pci;
|
||||
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
|
||||
driver->name, driver->major, driver->minor, driver->patchlevel,
|
||||
driver->date, pci_name(pdev), dev->primary->index);
|
||||
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
return 0;
|
||||
|
||||
err_g4:
|
||||
drm_put_minor(&dev->primary);
|
||||
err_g3:
|
||||
if (dev->render)
|
||||
drm_put_minor(&dev->render);
|
||||
err_g21:
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
drm_put_minor(&dev->control);
|
||||
err_g2:
|
||||
err_pci:
|
||||
pci_disable_device(pdev);
|
||||
err_g1:
|
||||
kfree(dev);
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
err_free:
|
||||
drm_dev_free(dev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_get_pci_dev);
|
||||
|
@ -47,55 +47,15 @@ static int drm_get_platform_dev(struct platform_device *platdev,
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
dev = drm_dev_alloc(driver, &platdev->dev);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
||||
dev->platformdev = platdev;
|
||||
dev->dev = &platdev->dev;
|
||||
|
||||
mutex_lock(&drm_global_mutex);
|
||||
|
||||
ret = drm_fill_in_dev(dev, NULL, driver);
|
||||
|
||||
if (ret) {
|
||||
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
|
||||
goto err_g1;
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
|
||||
if (ret)
|
||||
goto err_g1;
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
|
||||
ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
|
||||
if (ret)
|
||||
goto err_g11;
|
||||
}
|
||||
|
||||
ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
|
||||
ret = drm_dev_register(dev, 0);
|
||||
if (ret)
|
||||
goto err_g2;
|
||||
|
||||
if (dev->driver->load) {
|
||||
ret = dev->driver->load(dev, 0);
|
||||
if (ret)
|
||||
goto err_g3;
|
||||
}
|
||||
|
||||
/* setup the grouping for the legacy output */
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = drm_mode_group_init_legacy_group(dev,
|
||||
&dev->primary->mode_group);
|
||||
if (ret)
|
||||
goto err_g3;
|
||||
}
|
||||
|
||||
list_add_tail(&dev->driver_item, &driver->device_list);
|
||||
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
goto err_free;
|
||||
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
|
||||
driver->name, driver->major, driver->minor, driver->patchlevel,
|
||||
@ -103,17 +63,8 @@ static int drm_get_platform_dev(struct platform_device *platdev,
|
||||
|
||||
return 0;
|
||||
|
||||
err_g3:
|
||||
drm_put_minor(&dev->primary);
|
||||
err_g2:
|
||||
if (dev->render)
|
||||
drm_put_minor(&dev->render);
|
||||
err_g11:
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
drm_put_minor(&dev->control);
|
||||
err_g1:
|
||||
kfree(dev);
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
err_free:
|
||||
drm_dev_free(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -637,14 +637,13 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
|
||||
unsigned count;
|
||||
struct scatterlist *sg;
|
||||
struct page *page;
|
||||
u32 len, offset;
|
||||
u32 len;
|
||||
int pg_index;
|
||||
dma_addr_t addr;
|
||||
|
||||
pg_index = 0;
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, count) {
|
||||
len = sg->length;
|
||||
offset = sg->offset;
|
||||
page = sg_page(sg);
|
||||
addr = sg_dma_address(sg);
|
||||
|
||||
|
@ -254,70 +254,6 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_fill_in_dev(struct drm_device *dev,
|
||||
const struct pci_device_id *ent,
|
||||
struct drm_driver *driver)
|
||||
{
|
||||
int retcode;
|
||||
|
||||
INIT_LIST_HEAD(&dev->filelist);
|
||||
INIT_LIST_HEAD(&dev->ctxlist);
|
||||
INIT_LIST_HEAD(&dev->vmalist);
|
||||
INIT_LIST_HEAD(&dev->maplist);
|
||||
INIT_LIST_HEAD(&dev->vblank_event_list);
|
||||
|
||||
spin_lock_init(&dev->count_lock);
|
||||
spin_lock_init(&dev->event_lock);
|
||||
mutex_init(&dev->struct_mutex);
|
||||
mutex_init(&dev->ctxlist_mutex);
|
||||
|
||||
if (drm_ht_create(&dev->map_hash, 12)) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* the DRM has 6 basic counters */
|
||||
dev->counters = 6;
|
||||
dev->types[0] = _DRM_STAT_LOCK;
|
||||
dev->types[1] = _DRM_STAT_OPENS;
|
||||
dev->types[2] = _DRM_STAT_CLOSES;
|
||||
dev->types[3] = _DRM_STAT_IOCTLS;
|
||||
dev->types[4] = _DRM_STAT_LOCKS;
|
||||
dev->types[5] = _DRM_STAT_UNLOCKS;
|
||||
|
||||
dev->driver = driver;
|
||||
|
||||
if (dev->driver->bus->agp_init) {
|
||||
retcode = dev->driver->bus->agp_init(dev);
|
||||
if (retcode)
|
||||
goto error_out_unreg;
|
||||
}
|
||||
|
||||
|
||||
|
||||
retcode = drm_ctxbitmap_init(dev);
|
||||
if (retcode) {
|
||||
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
|
||||
goto error_out_unreg;
|
||||
}
|
||||
|
||||
if (driver->driver_features & DRIVER_GEM) {
|
||||
retcode = drm_gem_init(dev);
|
||||
if (retcode) {
|
||||
DRM_ERROR("Cannot initialize graphics execution "
|
||||
"manager (GEM)\n");
|
||||
goto error_out_unreg;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_out_unreg:
|
||||
drm_lastclose(dev);
|
||||
return retcode;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fill_in_dev);
|
||||
|
||||
|
||||
/**
|
||||
* Get a secondary minor number.
|
||||
*
|
||||
@ -427,47 +363,15 @@ static void drm_unplug_minor(struct drm_minor *minor)
|
||||
*/
|
||||
void drm_put_dev(struct drm_device *dev)
|
||||
{
|
||||
struct drm_driver *driver;
|
||||
struct drm_map_list *r_list, *list_temp;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
if (!dev) {
|
||||
DRM_ERROR("cleanup called no dev\n");
|
||||
return;
|
||||
}
|
||||
driver = dev->driver;
|
||||
|
||||
drm_lastclose(dev);
|
||||
|
||||
if (dev->driver->unload)
|
||||
dev->driver->unload(dev);
|
||||
|
||||
if (dev->driver->bus->agp_destroy)
|
||||
dev->driver->bus->agp_destroy(dev);
|
||||
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
|
||||
drm_rmmap(dev, r_list->map);
|
||||
drm_ht_remove(&dev->map_hash);
|
||||
|
||||
drm_ctxbitmap_cleanup(dev);
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
drm_put_minor(&dev->control);
|
||||
|
||||
if (dev->render)
|
||||
drm_put_minor(&dev->render);
|
||||
|
||||
if (driver->driver_features & DRIVER_GEM)
|
||||
drm_gem_destroy(dev);
|
||||
|
||||
drm_put_minor(&dev->primary);
|
||||
|
||||
list_del(&dev->driver_item);
|
||||
kfree(dev->devname);
|
||||
kfree(dev);
|
||||
drm_dev_unregister(dev);
|
||||
drm_dev_free(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_put_dev);
|
||||
|
||||
@ -490,3 +394,206 @@ void drm_unplug_dev(struct drm_device *dev)
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_unplug_dev);
|
||||
|
||||
/**
|
||||
* drm_dev_alloc - Allocate new drm device
|
||||
* @driver: DRM driver to allocate device for
|
||||
* @parent: Parent device object
|
||||
*
|
||||
* Allocate and initialize a new DRM device. No device registration is done.
|
||||
* Call drm_dev_register() to advertice the device to user space and register it
|
||||
* with other core subsystems.
|
||||
*
|
||||
* RETURNS:
|
||||
* Pointer to new DRM device, or NULL if out of memory.
|
||||
*/
|
||||
struct drm_device *drm_dev_alloc(struct drm_driver *driver,
|
||||
struct device *parent)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
int ret;
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
dev->dev = parent;
|
||||
dev->driver = driver;
|
||||
|
||||
INIT_LIST_HEAD(&dev->filelist);
|
||||
INIT_LIST_HEAD(&dev->ctxlist);
|
||||
INIT_LIST_HEAD(&dev->vmalist);
|
||||
INIT_LIST_HEAD(&dev->maplist);
|
||||
INIT_LIST_HEAD(&dev->vblank_event_list);
|
||||
|
||||
spin_lock_init(&dev->count_lock);
|
||||
spin_lock_init(&dev->event_lock);
|
||||
mutex_init(&dev->struct_mutex);
|
||||
mutex_init(&dev->ctxlist_mutex);
|
||||
|
||||
if (drm_ht_create(&dev->map_hash, 12))
|
||||
goto err_free;
|
||||
|
||||
ret = drm_ctxbitmap_init(dev);
|
||||
if (ret) {
|
||||
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
|
||||
goto err_ht;
|
||||
}
|
||||
|
||||
if (driver->driver_features & DRIVER_GEM) {
|
||||
ret = drm_gem_init(dev);
|
||||
if (ret) {
|
||||
DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
|
||||
goto err_ctxbitmap;
|
||||
}
|
||||
}
|
||||
|
||||
return dev;
|
||||
|
||||
err_ctxbitmap:
|
||||
drm_ctxbitmap_cleanup(dev);
|
||||
err_ht:
|
||||
drm_ht_remove(&dev->map_hash);
|
||||
err_free:
|
||||
kfree(dev);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dev_alloc);
|
||||
|
||||
/**
|
||||
* drm_dev_free - Free DRM device
|
||||
* @dev: DRM device to free
|
||||
*
|
||||
* Free a DRM device that has previously been allocated via drm_dev_alloc().
|
||||
* You must not use kfree() instead or you will leak memory.
|
||||
*
|
||||
* This must not be called once the device got registered. Use drm_put_dev()
|
||||
* instead, which then calls drm_dev_free().
|
||||
*/
|
||||
void drm_dev_free(struct drm_device *dev)
|
||||
{
|
||||
if (dev->driver->driver_features & DRIVER_GEM)
|
||||
drm_gem_destroy(dev);
|
||||
|
||||
drm_ctxbitmap_cleanup(dev);
|
||||
drm_ht_remove(&dev->map_hash);
|
||||
|
||||
kfree(dev->devname);
|
||||
kfree(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dev_free);
|
||||
|
||||
/**
|
||||
* drm_dev_register - Register DRM device
|
||||
* @dev: Device to register
|
||||
*
|
||||
* Register the DRM device @dev with the system, advertise device to user-space
|
||||
* and start normal device operation. @dev must be allocated via drm_dev_alloc()
|
||||
* previously.
|
||||
*
|
||||
* Never call this twice on any device!
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on success, negative error code on failure.
|
||||
*/
|
||||
int drm_dev_register(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&drm_global_mutex);
|
||||
|
||||
if (dev->driver->bus->agp_init) {
|
||||
ret = dev->driver->bus->agp_init(dev);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
|
||||
if (ret)
|
||||
goto err_agp;
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
|
||||
ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
|
||||
if (ret)
|
||||
goto err_control_node;
|
||||
}
|
||||
|
||||
ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
|
||||
if (ret)
|
||||
goto err_render_node;
|
||||
|
||||
if (dev->driver->load) {
|
||||
ret = dev->driver->load(dev, flags);
|
||||
if (ret)
|
||||
goto err_primary_node;
|
||||
}
|
||||
|
||||
/* setup grouping for legacy outputs */
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = drm_mode_group_init_legacy_group(dev,
|
||||
&dev->primary->mode_group);
|
||||
if (ret)
|
||||
goto err_unload;
|
||||
}
|
||||
|
||||
list_add_tail(&dev->driver_item, &dev->driver->device_list);
|
||||
|
||||
ret = 0;
|
||||
goto out_unlock;
|
||||
|
||||
err_unload:
|
||||
if (dev->driver->unload)
|
||||
dev->driver->unload(dev);
|
||||
err_primary_node:
|
||||
drm_put_minor(&dev->primary);
|
||||
err_render_node:
|
||||
if (dev->render)
|
||||
drm_put_minor(&dev->render);
|
||||
err_control_node:
|
||||
if (dev->control)
|
||||
drm_put_minor(&dev->control);
|
||||
err_agp:
|
||||
if (dev->driver->bus->agp_destroy)
|
||||
dev->driver->bus->agp_destroy(dev);
|
||||
out_unlock:
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dev_register);
|
||||
|
||||
/**
|
||||
* drm_dev_unregister - Unregister DRM device
|
||||
* @dev: Device to unregister
|
||||
*
|
||||
* Unregister the DRM device from the system. This does the reverse of
|
||||
* drm_dev_register() but does not deallocate the device. The caller must call
|
||||
* drm_dev_free() to free all resources.
|
||||
*/
|
||||
void drm_dev_unregister(struct drm_device *dev)
|
||||
{
|
||||
struct drm_map_list *r_list, *list_temp;
|
||||
|
||||
drm_lastclose(dev);
|
||||
|
||||
if (dev->driver->unload)
|
||||
dev->driver->unload(dev);
|
||||
|
||||
if (dev->driver->bus->agp_destroy)
|
||||
dev->driver->bus->agp_destroy(dev);
|
||||
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
|
||||
drm_rmmap(dev, r_list->map);
|
||||
|
||||
if (dev->control)
|
||||
drm_put_minor(&dev->control);
|
||||
if (dev->render)
|
||||
drm_put_minor(&dev->render);
|
||||
drm_put_minor(&dev->primary);
|
||||
|
||||
list_del(&dev->driver_item);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dev_unregister);
|
||||
|
@ -7,57 +7,20 @@ int drm_get_usb_dev(struct usb_interface *interface,
|
||||
struct drm_driver *driver)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct usb_device *usbdev;
|
||||
int ret;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
dev = drm_dev_alloc(driver, &interface->dev);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
||||
usbdev = interface_to_usbdev(interface);
|
||||
dev->usbdev = usbdev;
|
||||
dev->dev = &interface->dev;
|
||||
|
||||
mutex_lock(&drm_global_mutex);
|
||||
|
||||
ret = drm_fill_in_dev(dev, NULL, driver);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
|
||||
goto err_g1;
|
||||
}
|
||||
|
||||
dev->usbdev = interface_to_usbdev(interface);
|
||||
usb_set_intfdata(interface, dev);
|
||||
ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
|
||||
|
||||
ret = drm_dev_register(dev, 0);
|
||||
if (ret)
|
||||
goto err_g1;
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
|
||||
ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
|
||||
if (ret)
|
||||
goto err_g11;
|
||||
}
|
||||
|
||||
ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
|
||||
if (ret)
|
||||
goto err_g2;
|
||||
|
||||
if (dev->driver->load) {
|
||||
ret = dev->driver->load(dev, 0);
|
||||
if (ret)
|
||||
goto err_g3;
|
||||
}
|
||||
|
||||
/* setup the grouping for the legacy output */
|
||||
ret = drm_mode_group_init_legacy_group(dev,
|
||||
&dev->primary->mode_group);
|
||||
if (ret)
|
||||
goto err_g3;
|
||||
|
||||
list_add_tail(&dev->driver_item, &driver->device_list);
|
||||
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
goto err_free;
|
||||
|
||||
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
|
||||
driver->name, driver->major, driver->minor, driver->patchlevel,
|
||||
@ -65,16 +28,8 @@ int drm_get_usb_dev(struct usb_interface *interface,
|
||||
|
||||
return 0;
|
||||
|
||||
err_g3:
|
||||
drm_put_minor(&dev->primary);
|
||||
err_g2:
|
||||
if (dev->render)
|
||||
drm_put_minor(&dev->render);
|
||||
err_g11:
|
||||
drm_put_minor(&dev->control);
|
||||
err_g1:
|
||||
kfree(dev);
|
||||
mutex_unlock(&drm_global_mutex);
|
||||
err_free:
|
||||
drm_dev_free(dev);
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
@ -264,7 +264,6 @@ static struct drm_driver exynos_drm_driver = {
|
||||
.get_vblank_counter = drm_vblank_count,
|
||||
.enable_vblank = exynos_drm_crtc_enable_vblank,
|
||||
.disable_vblank = exynos_drm_crtc_disable_vblank,
|
||||
.gem_init_object = exynos_drm_gem_init_object,
|
||||
.gem_free_object = exynos_drm_gem_free_object,
|
||||
.gem_vm_ops = &exynos_drm_gem_vm_ops,
|
||||
.dumb_create = exynos_drm_gem_dumb_create,
|
||||
|
@ -716,20 +716,20 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
|
||||
{
|
||||
/*
|
||||
* enable drm irq mode.
|
||||
* - with irq_enabled = 1, we can use the vblank feature.
|
||||
* - with irq_enabled = true, we can use the vblank feature.
|
||||
*
|
||||
* P.S. note that we wouldn't use drm irq handler but
|
||||
* just specific driver own one instead because
|
||||
* drm framework supports only one irq handler.
|
||||
*/
|
||||
drm_dev->irq_enabled = 1;
|
||||
drm_dev->irq_enabled = true;
|
||||
|
||||
/*
|
||||
* with vblank_disable_allowed = 1, vblank interrupt will be disabled
|
||||
* with vblank_disable_allowed = true, vblank interrupt will be disabled
|
||||
* by drm timer once a current process gives up ownership of
|
||||
* vblank event.(after drm_vblank_put function is called)
|
||||
*/
|
||||
drm_dev->vblank_disable_allowed = 1;
|
||||
drm_dev->vblank_disable_allowed = true;
|
||||
|
||||
/* attach this sub driver to iommu mapping if supported. */
|
||||
if (is_drm_iommu_supported(drm_dev))
|
||||
|
@ -630,11 +630,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
|
||||
dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
|
||||
}
|
||||
|
||||
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void exynos_drm_gem_free_object(struct drm_gem_object *obj)
|
||||
{
|
||||
struct exynos_drm_gem_obj *exynos_gem_obj;
|
||||
|
@ -135,9 +135,6 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
|
||||
unsigned int gem_handle,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/* initialize gem object. */
|
||||
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
|
||||
|
||||
/* free gem object. */
|
||||
void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
|
||||
|
||||
|
@ -101,7 +101,6 @@ static struct edid *vidi_get_edid(struct device *dev,
|
||||
{
|
||||
struct vidi_context *ctx = get_vidi_context(dev);
|
||||
struct edid *edid;
|
||||
int edid_len;
|
||||
|
||||
/*
|
||||
* the edid data comes from user side and it would be set
|
||||
@ -112,8 +111,7 @@ static struct edid *vidi_get_edid(struct device *dev,
|
||||
return ERR_PTR(-EFAULT);
|
||||
}
|
||||
|
||||
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
|
||||
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
|
||||
edid = drm_edid_duplicate(ctx->raw_edid);
|
||||
if (!edid) {
|
||||
DRM_DEBUG_KMS("failed to allocate edid\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
@ -385,20 +383,20 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
|
||||
{
|
||||
/*
|
||||
* enable drm irq mode.
|
||||
* - with irq_enabled = 1, we can use the vblank feature.
|
||||
* - with irq_enabled = true, we can use the vblank feature.
|
||||
*
|
||||
* P.S. note that we wouldn't use drm irq handler but
|
||||
* just specific driver own one instead because
|
||||
* drm framework supports only one irq handler.
|
||||
*/
|
||||
drm_dev->irq_enabled = 1;
|
||||
drm_dev->irq_enabled = true;
|
||||
|
||||
/*
|
||||
* with vblank_disable_allowed = 1, vblank interrupt will be disabled
|
||||
* with vblank_disable_allowed = true, vblank interrupt will be disabled
|
||||
* by drm timer once a current process gives up ownership of
|
||||
* vblank event.(after drm_vblank_put function is called)
|
||||
*/
|
||||
drm_dev->vblank_disable_allowed = 1;
|
||||
drm_dev->vblank_disable_allowed = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -485,7 +483,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
|
||||
struct exynos_drm_manager *manager;
|
||||
struct exynos_drm_display_ops *display_ops;
|
||||
struct drm_exynos_vidi_connection *vidi = data;
|
||||
int edid_len;
|
||||
|
||||
if (!vidi) {
|
||||
DRM_DEBUG_KMS("user data for vidi is null.\n");
|
||||
@ -524,8 +521,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
|
||||
DRM_DEBUG_KMS("edid data is invalid.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
|
||||
ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
|
||||
ctx->raw_edid = drm_edid_duplicate(raw_edid);
|
||||
if (!ctx->raw_edid) {
|
||||
DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
|
||||
return -ENOMEM;
|
||||
|
@ -29,11 +29,6 @@
|
||||
#include <drm/drm_vma_manager.h>
|
||||
#include "psb_drv.h"
|
||||
|
||||
int psb_gem_init_object(struct drm_gem_object *obj)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void psb_gem_free_object(struct drm_gem_object *obj)
|
||||
{
|
||||
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
|
||||
|
@ -359,7 +359,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
|
||||
drm_irq_install(dev);
|
||||
|
||||
dev->vblank_disable_allowed = 1;
|
||||
dev->vblank_disable_allowed = true;
|
||||
|
||||
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
|
||||
|
||||
@ -646,7 +646,6 @@ static struct drm_driver driver = {
|
||||
.preclose = psb_driver_preclose,
|
||||
.postclose = psb_driver_close,
|
||||
|
||||
.gem_init_object = psb_gem_init_object,
|
||||
.gem_free_object = psb_gem_free_object,
|
||||
.gem_vm_ops = &psb_gem_vm_ops,
|
||||
.dumb_create = psb_gem_dumb_create,
|
||||
|
@ -44,10 +44,10 @@ enum {
|
||||
CHIP_MFLD_0130 = 3, /* Medfield */
|
||||
};
|
||||
|
||||
#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
|
||||
#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
|
||||
#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
|
||||
#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
|
||||
#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
|
||||
#define IS_MRST(dev) (((dev)->pdev->device & 0xfffc) == 0x4100)
|
||||
#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
|
||||
#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
|
||||
|
||||
/*
|
||||
* Driver definitions
|
||||
@ -837,7 +837,6 @@ extern const struct drm_connector_helper_funcs
|
||||
extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
|
||||
|
||||
/* gem.c */
|
||||
extern int psb_gem_init_object(struct drm_gem_object *obj);
|
||||
extern void psb_gem_free_object(struct drm_gem_object *obj);
|
||||
extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
|
@ -271,15 +271,15 @@ void psb_irq_preinstall(struct drm_device *dev)
|
||||
|
||||
if (gma_power_is_on(dev))
|
||||
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
|
||||
if (dev->vblank_enabled[0])
|
||||
if (dev->vblank[0].enabled)
|
||||
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
|
||||
if (dev->vblank_enabled[1])
|
||||
if (dev->vblank[1].enabled)
|
||||
dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
|
||||
|
||||
/* FIXME: Handle Medfield irq mask
|
||||
if (dev->vblank_enabled[1])
|
||||
if (dev->vblank[1].enabled)
|
||||
dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
|
||||
if (dev->vblank_enabled[2])
|
||||
if (dev->vblank[2].enabled)
|
||||
dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
|
||||
*/
|
||||
|
||||
@ -305,17 +305,17 @@ int psb_irq_postinstall(struct drm_device *dev)
|
||||
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
|
||||
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
|
||||
|
||||
if (dev->vblank_enabled[0])
|
||||
if (dev->vblank[0].enabled)
|
||||
psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
else
|
||||
psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
|
||||
if (dev->vblank_enabled[1])
|
||||
if (dev->vblank[1].enabled)
|
||||
psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
else
|
||||
psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
|
||||
if (dev->vblank_enabled[2])
|
||||
if (dev->vblank[2].enabled)
|
||||
psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
else
|
||||
psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
@ -339,13 +339,13 @@ void psb_irq_uninstall(struct drm_device *dev)
|
||||
|
||||
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
|
||||
|
||||
if (dev->vblank_enabled[0])
|
||||
if (dev->vblank[0].enabled)
|
||||
psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
|
||||
if (dev->vblank_enabled[1])
|
||||
if (dev->vblank[1].enabled)
|
||||
psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
|
||||
if (dev->vblank_enabled[2])
|
||||
if (dev->vblank[2].enabled)
|
||||
psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
|
||||
|
||||
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
|
||||
@ -456,7 +456,7 @@ static int psb_vblank_do_wait(struct drm_device *dev,
|
||||
{
|
||||
unsigned int cur_vblank;
|
||||
int ret = 0;
|
||||
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
|
||||
DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
|
||||
(((cur_vblank = atomic_read(counter))
|
||||
- *sequence) <= (1 << 23)));
|
||||
*sequence = cur_vblank;
|
||||
|
@ -944,8 +944,6 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
|
||||
dma->buflist[vertex->idx],
|
||||
vertex->discard, vertex->used);
|
||||
|
||||
atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
|
||||
atomic_inc(&dev->counts[_DRM_STAT_DMA]);
|
||||
sarea_priv->last_enqueue = dev_priv->counter - 1;
|
||||
sarea_priv->last_dispatch = (int)hw_status[5];
|
||||
|
||||
@ -1105,8 +1103,6 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
|
||||
i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
|
||||
mc->last_render);
|
||||
|
||||
atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
|
||||
atomic_inc(&dev->counts[_DRM_STAT_DMA]);
|
||||
sarea_priv->last_enqueue = dev_priv->counter - 1;
|
||||
sarea_priv->last_dispatch = (int)hw_status[5];
|
||||
|
||||
@ -1197,13 +1193,6 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
|
||||
|
||||
int i810_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
{
|
||||
/* i810 has 4 more counters */
|
||||
dev->counters += 4;
|
||||
dev->types[6] = _DRM_STAT_IRQ;
|
||||
dev->types[7] = _DRM_STAT_PRIMARY;
|
||||
dev->types[8] = _DRM_STAT_SECONDARY;
|
||||
dev->types[9] = _DRM_STAT_DMA;
|
||||
|
||||
pci_set_master(dev->pdev);
|
||||
|
||||
return 0;
|
||||
|
@ -931,7 +931,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
||||
value = READ_BREADCRUMB(dev_priv);
|
||||
break;
|
||||
case I915_PARAM_CHIPSET_ID:
|
||||
value = dev->pci_device;
|
||||
value = dev->pdev->device;
|
||||
break;
|
||||
case I915_PARAM_HAS_GEM:
|
||||
value = 1;
|
||||
@ -1333,7 +1333,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
||||
|
||||
/* Always safe in the mode setting case. */
|
||||
/* FIXME: do pre/post-mode set stuff in core KMS code */
|
||||
dev->vblank_disable_allowed = 1;
|
||||
dev->vblank_disable_allowed = true;
|
||||
if (INTEL_INFO(dev)->num_pipes == 0) {
|
||||
intel_display_power_put(dev, POWER_DOMAIN_VGA);
|
||||
return 0;
|
||||
@ -1480,13 +1480,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
|
||||
return -ENODEV;
|
||||
|
||||
/* i915 has 4 more counters */
|
||||
dev->counters += 4;
|
||||
dev->types[6] = _DRM_STAT_IRQ;
|
||||
dev->types[7] = _DRM_STAT_PRIMARY;
|
||||
dev->types[8] = _DRM_STAT_SECONDARY;
|
||||
dev->types[9] = _DRM_STAT_DMA;
|
||||
|
||||
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
|
||||
if (dev_priv == NULL)
|
||||
return -ENOMEM;
|
||||
|
@ -940,7 +940,6 @@ static struct drm_driver driver = {
|
||||
.debugfs_init = i915_debugfs_init,
|
||||
.debugfs_cleanup = i915_debugfs_cleanup,
|
||||
#endif
|
||||
.gem_init_object = i915_gem_init_object,
|
||||
.gem_free_object = i915_gem_free_object,
|
||||
.gem_vm_ops = &i915_gem_vm_ops,
|
||||
|
||||
|
@ -1615,39 +1615,39 @@ struct drm_i915_file_private {
|
||||
|
||||
#define INTEL_INFO(dev) (to_i915(dev)->info)
|
||||
|
||||
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
|
||||
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
|
||||
#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
|
||||
#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
|
||||
#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
|
||||
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
|
||||
#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
|
||||
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
|
||||
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
|
||||
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
|
||||
#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
|
||||
#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
|
||||
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
|
||||
#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
|
||||
#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
|
||||
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
|
||||
#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
|
||||
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
|
||||
#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
|
||||
#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
|
||||
#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
|
||||
#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
|
||||
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
|
||||
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
|
||||
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
|
||||
#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
|
||||
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
|
||||
#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
|
||||
(dev)->pci_device == 0x0152 || \
|
||||
(dev)->pci_device == 0x015a)
|
||||
#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
|
||||
(dev)->pci_device == 0x0106 || \
|
||||
(dev)->pci_device == 0x010A)
|
||||
#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
|
||||
(dev)->pdev->device == 0x0152 || \
|
||||
(dev)->pdev->device == 0x015a)
|
||||
#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
|
||||
(dev)->pdev->device == 0x0106 || \
|
||||
(dev)->pdev->device == 0x010A)
|
||||
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
|
||||
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
|
||||
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
|
||||
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
|
||||
((dev)->pci_device & 0xFF00) == 0x0C00)
|
||||
((dev)->pdev->device & 0xFF00) == 0x0C00)
|
||||
#define IS_ULT(dev) (IS_HASWELL(dev) && \
|
||||
((dev)->pci_device & 0xFF00) == 0x0A00)
|
||||
((dev)->pdev->device & 0xFF00) == 0x0A00)
|
||||
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
|
||||
((dev)->pci_device & 0x00F0) == 0x0020)
|
||||
((dev)->pdev->device & 0x00F0) == 0x0020)
|
||||
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
|
||||
|
||||
/*
|
||||
@ -1874,7 +1874,6 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
|
||||
void i915_gem_load(struct drm_device *dev);
|
||||
void *i915_gem_object_alloc(struct drm_device *dev);
|
||||
void i915_gem_object_free(struct drm_i915_gem_object *obj);
|
||||
int i915_gem_init_object(struct drm_gem_object *obj);
|
||||
void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
const struct drm_i915_gem_object_ops *ops);
|
||||
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
|
||||
|
@ -4148,13 +4148,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
|
||||
return obj;
|
||||
}
|
||||
|
||||
int i915_gem_init_object(struct drm_gem_object *obj)
|
||||
{
|
||||
BUG();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
|
||||
|
@ -304,7 +304,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
|
||||
err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
|
||||
error->time.tv_usec);
|
||||
err_printf(m, "Kernel: " UTS_RELEASE "\n");
|
||||
err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
|
||||
err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
|
||||
err_printf(m, "EIR: 0x%08x\n", error->eir);
|
||||
err_printf(m, "IER: 0x%08x\n", error->ier);
|
||||
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
|
||||
|
@ -316,9 +316,6 @@ static void intel_ddi_mode_set(struct intel_encoder *encoder)
|
||||
DRM_DEBUG_DRIVER("DP audio: write eld information\n");
|
||||
intel_write_eld(&encoder->base, adjusted_mode);
|
||||
}
|
||||
|
||||
intel_dp_init_link_config(intel_dp);
|
||||
|
||||
} else if (type == INTEL_OUTPUT_HDMI) {
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
|
||||
@ -1222,7 +1219,7 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
|
||||
|
||||
val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
|
||||
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
|
||||
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
|
||||
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
|
||||
val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
|
||||
I915_WRITE(DP_TP_CTL(port), val);
|
||||
POSTING_READ(DP_TP_CTL(port));
|
||||
|
@ -884,21 +884,6 @@ found:
|
||||
return true;
|
||||
}
|
||||
|
||||
void intel_dp_init_link_config(struct intel_dp *intel_dp)
|
||||
{
|
||||
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
|
||||
intel_dp->link_configuration[0] = intel_dp->link_bw;
|
||||
intel_dp->link_configuration[1] = intel_dp->lane_count;
|
||||
intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
|
||||
/*
|
||||
* Check for DPCD version > 1.1 and enhanced framing support
|
||||
*/
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
|
||||
(intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
|
||||
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
|
||||
}
|
||||
}
|
||||
|
||||
static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
@ -971,8 +956,6 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
|
||||
intel_write_eld(&encoder->base, adjusted_mode);
|
||||
}
|
||||
|
||||
intel_dp_init_link_config(intel_dp);
|
||||
|
||||
/* Split out the IBX/CPU vs CPT settings */
|
||||
|
||||
if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
|
||||
@ -982,7 +965,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
|
||||
intel_dp->DP |= DP_SYNC_VS_HIGH;
|
||||
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
|
||||
|
||||
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
|
||||
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
|
||||
intel_dp->DP |= DP_ENHANCED_FRAMING;
|
||||
|
||||
intel_dp->DP |= crtc->pipe << 29;
|
||||
@ -996,7 +979,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
|
||||
intel_dp->DP |= DP_SYNC_VS_HIGH;
|
||||
intel_dp->DP |= DP_LINK_TRAIN_OFF;
|
||||
|
||||
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
|
||||
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
|
||||
intel_dp->DP |= DP_ENHANCED_FRAMING;
|
||||
|
||||
if (crtc->pipe == 1)
|
||||
@ -2474,14 +2457,21 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
uint8_t voltage;
|
||||
int voltage_tries, loop_tries;
|
||||
uint32_t DP = intel_dp->DP;
|
||||
uint8_t link_config[2];
|
||||
|
||||
if (HAS_DDI(dev))
|
||||
intel_ddi_prepare_link_retrain(encoder);
|
||||
|
||||
/* Write the link configuration data */
|
||||
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
|
||||
intel_dp->link_configuration,
|
||||
DP_LINK_CONFIGURATION_SIZE);
|
||||
link_config[0] = intel_dp->link_bw;
|
||||
link_config[1] = intel_dp->lane_count;
|
||||
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
|
||||
link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
|
||||
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
|
||||
|
||||
link_config[0] = 0;
|
||||
link_config[1] = DP_SET_ANSI_8B10B;
|
||||
intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
|
||||
|
||||
DP |= DP_PORT_EN;
|
||||
|
||||
@ -2862,7 +2852,6 @@ static enum drm_connector_status
|
||||
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
|
||||
{
|
||||
uint8_t *dpcd = intel_dp->dpcd;
|
||||
bool hpd;
|
||||
uint8_t type;
|
||||
|
||||
if (!intel_dp_get_dpcd(intel_dp))
|
||||
@ -2873,8 +2862,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
|
||||
return connector_status_connected;
|
||||
|
||||
/* If we're HPD-aware, SINK_COUNT changes dynamically */
|
||||
hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
|
||||
if (hpd) {
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
|
||||
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
|
||||
uint8_t reg;
|
||||
if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
|
||||
®, 1))
|
||||
@ -2888,9 +2877,18 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
|
||||
return connector_status_connected;
|
||||
|
||||
/* Well we tried, say unknown for unreliable port types */
|
||||
type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
|
||||
if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
|
||||
return connector_status_unknown;
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
|
||||
type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
|
||||
if (type == DP_DS_PORT_TYPE_VGA ||
|
||||
type == DP_DS_PORT_TYPE_NON_EDID)
|
||||
return connector_status_unknown;
|
||||
} else {
|
||||
type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
|
||||
DP_DWN_STRM_PORT_TYPE_MASK;
|
||||
if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
|
||||
type == DP_DWN_STRM_PORT_TYPE_OTHER)
|
||||
return connector_status_unknown;
|
||||
}
|
||||
|
||||
/* Anything else is out of spec, warn and ignore */
|
||||
DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
|
||||
@ -2964,19 +2962,11 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
|
||||
|
||||
/* use cached edid if we have one */
|
||||
if (intel_connector->edid) {
|
||||
struct edid *edid;
|
||||
int size;
|
||||
|
||||
/* invalid edid */
|
||||
if (IS_ERR(intel_connector->edid))
|
||||
return NULL;
|
||||
|
||||
size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
|
||||
edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
|
||||
if (!edid)
|
||||
return NULL;
|
||||
|
||||
return edid;
|
||||
return drm_edid_duplicate(intel_connector->edid);
|
||||
}
|
||||
|
||||
return drm_get_edid(connector, adapter);
|
||||
|
@ -436,13 +436,11 @@ struct intel_hdmi {
|
||||
};
|
||||
|
||||
#define DP_MAX_DOWNSTREAM_PORTS 0x10
|
||||
#define DP_LINK_CONFIGURATION_SIZE 9
|
||||
|
||||
struct intel_dp {
|
||||
uint32_t output_reg;
|
||||
uint32_t aux_ch_ctl_reg;
|
||||
uint32_t DP;
|
||||
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
|
||||
bool has_audio;
|
||||
enum hdmi_force_audio force_audio;
|
||||
uint32_t color_range;
|
||||
@ -685,7 +683,6 @@ void i915_disable_vga_mem(struct drm_device *dev);
|
||||
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
|
||||
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
struct intel_connector *intel_connector);
|
||||
void intel_dp_init_link_config(struct intel_dp *intel_dp);
|
||||
void intel_dp_start_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_complete_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
|
||||
|
@ -5423,7 +5423,7 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
for_each_pipe(p)
|
||||
if (p != PIPE_A)
|
||||
dev->last_vblank[p] = 0;
|
||||
dev->vblank[p].last = 0;
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
}
|
||||
}
|
||||
|
@ -1044,7 +1044,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
|
||||
tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
|
||||
|
||||
/* Enable two fixes for the chips that need them. */
|
||||
if (dev->pci_device < 0x2772)
|
||||
if (dev->pdev->device < 0x2772)
|
||||
tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
|
||||
|
||||
I915_WRITE(TV_H_CTL_1, hctl1);
|
||||
|
@ -406,11 +406,6 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
|
||||
dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
|
||||
|
||||
dev->counters += 3;
|
||||
dev->types[6] = _DRM_STAT_IRQ;
|
||||
dev->types[7] = _DRM_STAT_PRIMARY;
|
||||
dev->types[8] = _DRM_STAT_SECONDARY;
|
||||
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
|
||||
if (ret) {
|
||||
|
@ -169,5 +169,5 @@ void mga_driver_irq_uninstall(struct drm_device *dev)
|
||||
/* Disable *all* interrupts */
|
||||
MGA_WRITE(MGA_IEN, 0);
|
||||
|
||||
dev->irq_enabled = 0;
|
||||
dev->irq_enabled = false;
|
||||
}
|
||||
|
@ -99,7 +99,6 @@ static struct drm_driver driver = {
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
|
||||
.gem_init_object = mgag200_gem_init_object,
|
||||
.gem_free_object = mgag200_gem_free_object,
|
||||
.dumb_create = mgag200_dumb_create,
|
||||
.dumb_map_offset = mgag200_dumb_mmap_offset,
|
||||
|
@ -260,7 +260,6 @@ int mgag200_driver_unload(struct drm_device *dev);
|
||||
int mgag200_gem_create(struct drm_device *dev,
|
||||
u32 size, bool iskernel,
|
||||
struct drm_gem_object **obj);
|
||||
int mgag200_gem_init_object(struct drm_gem_object *obj);
|
||||
int mgag200_dumb_create(struct drm_file *file,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
@ -310,12 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mgag200_gem_init_object(struct drm_gem_object *obj)
|
||||
{
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mgag200_bo_unref(struct mgag200_bo **bo)
|
||||
{
|
||||
struct ttm_buffer_object *tbo;
|
||||
|
@ -210,8 +210,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
|
||||
sim_data.nvclk_khz = NVClk;
|
||||
sim_data.bpp = bpp;
|
||||
sim_data.two_heads = nv_two_heads(dev);
|
||||
if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
|
||||
(dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
|
||||
if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
|
||||
(dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
|
||||
uint32_t type;
|
||||
|
||||
pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
|
||||
@ -256,8 +256,8 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
|
||||
|
||||
if (nv_device(drm->device)->card_type < NV_20)
|
||||
nv04_update_arb(dev, vclk, bpp, burst, lwm);
|
||||
else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
|
||||
(dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
|
||||
else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
|
||||
(dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
|
||||
*burst = 128;
|
||||
*lwm = 0x0480;
|
||||
} else
|
||||
|
@ -490,8 +490,8 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
|
||||
/* BIOS scripts usually take care of the backlight, thanks
|
||||
* Apple for your consistency.
|
||||
*/
|
||||
if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 ||
|
||||
dev->pci_device == 0x0189 || dev->pci_device == 0x0329) {
|
||||
if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
|
||||
dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
|
||||
if (mode == DRM_MODE_DPMS_ON) {
|
||||
nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
|
||||
nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
|
||||
|
@ -127,7 +127,7 @@ static inline bool
|
||||
nv_two_heads(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
const int impl = dev->pci_device & 0x0ff0;
|
||||
const int impl = dev->pdev->device & 0x0ff0;
|
||||
|
||||
if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
|
||||
impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
|
||||
@ -139,14 +139,14 @@ nv_two_heads(struct drm_device *dev)
|
||||
static inline bool
|
||||
nv_gf4_disp_arch(struct drm_device *dev)
|
||||
{
|
||||
return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
|
||||
return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
nv_two_reg_pll(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
const int impl = dev->pci_device & 0x0ff0;
|
||||
const int impl = dev->pdev->device & 0x0ff0;
|
||||
|
||||
if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
|
||||
return true;
|
||||
|
@ -220,7 +220,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
|
||||
int ret;
|
||||
|
||||
if (plltype == PLL_MEMORY &&
|
||||
(dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
|
||||
(dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
|
||||
uint32_t mpllP;
|
||||
|
||||
pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
|
||||
@ -230,7 +230,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
|
||||
return 400000 / mpllP;
|
||||
} else
|
||||
if (plltype == PLL_MEMORY &&
|
||||
(dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
|
||||
(dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
|
||||
uint32_t clock;
|
||||
|
||||
pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
|
||||
|
@ -130,7 +130,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
|
||||
if (chan->ntfy) {
|
||||
nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
|
||||
nouveau_bo_unpin(chan->ntfy);
|
||||
drm_gem_object_unreference_unlocked(chan->ntfy->gem);
|
||||
drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
|
||||
}
|
||||
|
||||
if (chan->heap.block_size)
|
||||
@ -178,10 +178,10 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
|
||||
getparam->value = device->chipset;
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_PCI_VENDOR:
|
||||
getparam->value = dev->pci_vendor;
|
||||
getparam->value = dev->pdev->vendor;
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_PCI_DEVICE:
|
||||
getparam->value = dev->pci_device;
|
||||
getparam->value = dev->pdev->device;
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_BUS_TYPE:
|
||||
if (drm_pci_device_is_agp(dev))
|
||||
@ -320,7 +320,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
|
||||
goto done;
|
||||
}
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, chan->ntfy->gem,
|
||||
ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
|
||||
&init->notifier_handle);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
@ -127,8 +127,8 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
|
||||
#ifdef __powerpc__
|
||||
/* Powerbook specific quirks */
|
||||
if (script == LVDS_RESET &&
|
||||
(dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
|
||||
dev->pci_device == 0x0329))
|
||||
(dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 ||
|
||||
dev->pdev->device == 0x0329))
|
||||
nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
|
||||
#endif
|
||||
|
||||
|
@ -146,7 +146,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
|
||||
struct drm_device *dev = drm->dev;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
|
||||
if (unlikely(nvbo->gem))
|
||||
if (unlikely(nvbo->gem.filp))
|
||||
DRM_ERROR("bo %p still attached to GEM object\n", bo);
|
||||
WARN_ON(nvbo->pin_refcnt > 0);
|
||||
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
|
||||
@ -1267,7 +1267,7 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
|
||||
return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp);
|
||||
return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -27,7 +27,10 @@ struct nouveau_bo {
|
||||
u32 tile_flags;
|
||||
struct nouveau_drm_tile *tile;
|
||||
|
||||
struct drm_gem_object *gem;
|
||||
/* Only valid if allocated via nouveau_gem_new() and iff you hold a
|
||||
* gem reference to it! For debugging, use gem.filp != NULL to test
|
||||
* whether it is valid. */
|
||||
struct drm_gem_object gem;
|
||||
|
||||
/* protect by the ttm reservation lock */
|
||||
int pin_refcnt;
|
||||
|
@ -215,8 +215,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
|
||||
connector->doublescan_allowed = true;
|
||||
if (nv_device(drm->device)->card_type == NV_20 ||
|
||||
(nv_device(drm->device)->card_type == NV_10 &&
|
||||
(dev->pci_device & 0x0ff0) != 0x0100 &&
|
||||
(dev->pci_device & 0x0ff0) != 0x0150))
|
||||
(dev->pdev->device & 0x0ff0) != 0x0100 &&
|
||||
(dev->pdev->device & 0x0ff0) != 0x0150))
|
||||
/* HW is broken */
|
||||
connector->interlace_allowed = false;
|
||||
else
|
||||
|
@ -50,7 +50,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
|
||||
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
|
||||
|
||||
if (fb->nvbo)
|
||||
drm_gem_object_unreference_unlocked(fb->nvbo->gem);
|
||||
drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
|
||||
|
||||
drm_framebuffer_cleanup(drm_fb);
|
||||
kfree(fb);
|
||||
@ -63,7 +63,7 @@ nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
|
||||
{
|
||||
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
|
||||
|
||||
return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
|
||||
return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
|
||||
}
|
||||
|
||||
static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
|
||||
@ -674,8 +674,8 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
|
||||
drm_gem_object_unreference_unlocked(bo->gem);
|
||||
ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
|
||||
drm_gem_object_unreference_unlocked(&bo->gem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -688,7 +688,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
|
||||
|
||||
gem = drm_gem_object_lookup(dev, file_priv, handle);
|
||||
if (gem) {
|
||||
struct nouveau_bo *bo = gem->driver_private;
|
||||
struct nouveau_bo *bo = nouveau_gem_object(gem);
|
||||
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
return 0;
|
||||
|
@ -834,7 +834,6 @@ driver = {
|
||||
.gem_prime_vmap = nouveau_gem_prime_vmap,
|
||||
.gem_prime_vunmap = nouveau_gem_prime_vunmap,
|
||||
|
||||
.gem_init_object = nouveau_gem_object_new,
|
||||
.gem_free_object = nouveau_gem_object_del,
|
||||
.gem_open_object = nouveau_gem_object_open,
|
||||
.gem_close_object = nouveau_gem_object_close,
|
||||
|
@ -420,7 +420,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
|
||||
nouveau_bo_unmap(nouveau_fb->nvbo);
|
||||
nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
|
||||
nouveau_bo_unpin(nouveau_fb->nvbo);
|
||||
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
|
||||
drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem);
|
||||
nouveau_fb->nvbo = NULL;
|
||||
}
|
||||
drm_fb_helper_fini(&fbcon->helper);
|
||||
|
@ -34,29 +34,20 @@
|
||||
#include "nouveau_ttm.h"
|
||||
#include "nouveau_gem.h"
|
||||
|
||||
int
|
||||
nouveau_gem_object_new(struct drm_gem_object *gem)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_gem_object_del(struct drm_gem_object *gem)
|
||||
{
|
||||
struct nouveau_bo *nvbo = gem->driver_private;
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
|
||||
struct ttm_buffer_object *bo = &nvbo->bo;
|
||||
|
||||
if (!nvbo)
|
||||
return;
|
||||
nvbo->gem = NULL;
|
||||
|
||||
if (gem->import_attach)
|
||||
drm_prime_gem_destroy(gem, nvbo->bo.sg);
|
||||
|
||||
ttm_bo_unref(&bo);
|
||||
|
||||
drm_gem_object_release(gem);
|
||||
kfree(gem);
|
||||
|
||||
/* reset filp so nouveau_bo_del_ttm() can test for it */
|
||||
gem->filp = NULL;
|
||||
ttm_bo_unref(&bo);
|
||||
}
|
||||
|
||||
int
|
||||
@ -186,14 +177,15 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
|
||||
if (nv_device(drm->device)->card_type >= NV_50)
|
||||
nvbo->valid_domains &= domain;
|
||||
|
||||
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
|
||||
if (!nvbo->gem) {
|
||||
/* Initialize the embedded gem-object. We return a single gem-reference
|
||||
* to the caller, instead of a normal nouveau_bo ttm reference. */
|
||||
ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
|
||||
if (ret) {
|
||||
nouveau_bo_ref(NULL, pnvbo);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
|
||||
nvbo->gem->driver_private = nvbo;
|
||||
nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -250,15 +242,15 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
|
||||
ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
|
||||
ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
|
||||
if (ret)
|
||||
drm_gem_handle_delete(file_priv, req->info.handle);
|
||||
}
|
||||
|
||||
/* drop reference from allocate - handle holds it now */
|
||||
drm_gem_object_unreference_unlocked(nvbo->gem);
|
||||
drm_gem_object_unreference_unlocked(&nvbo->gem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -266,7 +258,7 @@ static int
|
||||
nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
|
||||
uint32_t write_domains, uint32_t valid_domains)
|
||||
{
|
||||
struct nouveau_bo *nvbo = gem->driver_private;
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
|
||||
struct ttm_buffer_object *bo = &nvbo->bo;
|
||||
uint32_t domains = valid_domains & nvbo->valid_domains &
|
||||
(write_domains ? write_domains : read_domains);
|
||||
@ -327,7 +319,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
|
||||
list_del(&nvbo->entry);
|
||||
nvbo->reserved_by = NULL;
|
||||
ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
|
||||
drm_gem_object_unreference_unlocked(nvbo->gem);
|
||||
drm_gem_object_unreference_unlocked(&nvbo->gem);
|
||||
}
|
||||
}
|
||||
|
||||
@ -376,7 +368,7 @@ retry:
|
||||
validate_fini(op, NULL);
|
||||
return -ENOENT;
|
||||
}
|
||||
nvbo = gem->driver_private;
|
||||
nvbo = nouveau_gem_object(gem);
|
||||
if (nvbo == res_bo) {
|
||||
res_bo = NULL;
|
||||
drm_gem_object_unreference_unlocked(gem);
|
||||
@ -478,7 +470,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
|
||||
ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
|
||||
b->write_domains,
|
||||
b->valid_domains);
|
||||
if (unlikely(ret)) {
|
||||
|
@ -12,14 +12,13 @@
|
||||
static inline struct nouveau_bo *
|
||||
nouveau_gem_object(struct drm_gem_object *gem)
|
||||
{
|
||||
return gem ? gem->driver_private : NULL;
|
||||
return gem ? container_of(gem, struct nouveau_bo, gem) : NULL;
|
||||
}
|
||||
|
||||
/* nouveau_gem.c */
|
||||
extern int nouveau_gem_new(struct drm_device *, int size, int align,
|
||||
uint32_t domain, uint32_t tile_mode,
|
||||
uint32_t tile_flags, struct nouveau_bo **);
|
||||
extern int nouveau_gem_object_new(struct drm_gem_object *);
|
||||
extern void nouveau_gem_object_del(struct drm_gem_object *);
|
||||
extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
|
||||
extern void nouveau_gem_object_close(struct drm_gem_object *,
|
||||
|
@ -71,14 +71,16 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
return ERR_PTR(ret);
|
||||
|
||||
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
|
||||
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
|
||||
if (!nvbo->gem) {
|
||||
|
||||
/* Initialize the embedded gem-object. We return a single gem-reference
|
||||
* to the caller, instead of a normal nouveau_bo ttm reference. */
|
||||
ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
|
||||
if (ret) {
|
||||
nouveau_bo_ref(NULL, &nvbo);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
nvbo->gem->driver_private = nvbo;
|
||||
return nvbo->gem;
|
||||
return &nvbo->gem;
|
||||
}
|
||||
|
||||
int nouveau_gem_prime_pin(struct drm_gem_object *obj)
|
||||
|
@ -620,7 +620,6 @@ static struct drm_driver omap_drm_driver = {
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = omap_gem_prime_export,
|
||||
.gem_prime_import = omap_gem_prime_import,
|
||||
.gem_init_object = omap_gem_init_object,
|
||||
.gem_free_object = omap_gem_free_object,
|
||||
.gem_vm_ops = &omap_gem_vm_ops,
|
||||
.dumb_create = omap_gem_dumb_create,
|
||||
|
@ -220,7 +220,6 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
|
||||
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
|
||||
union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
|
||||
void omap_gem_free_object(struct drm_gem_object *obj);
|
||||
int omap_gem_init_object(struct drm_gem_object *obj);
|
||||
void *omap_gem_vaddr(struct drm_gem_object *obj);
|
||||
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset);
|
||||
|
@ -1274,11 +1274,6 @@ unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int omap_gem_init_object(struct drm_gem_object *obj)
|
||||
{
|
||||
return -EINVAL; /* unused */
|
||||
}
|
||||
|
||||
/* don't call directly.. called from GEM core when it is time to actually
|
||||
* free the object..
|
||||
*/
|
||||
|
@ -261,7 +261,7 @@ int omap_drm_irq_install(struct drm_device *dev)
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
dev->irq_enabled = 1;
|
||||
dev->irq_enabled = true;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/* Before installing handler */
|
||||
@ -272,7 +272,7 @@ int omap_drm_irq_install(struct drm_device *dev)
|
||||
|
||||
if (ret < 0) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
dev->irq_enabled = 0;
|
||||
dev->irq_enabled = false;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
@ -283,7 +283,7 @@ int omap_drm_irq_install(struct drm_device *dev)
|
||||
|
||||
if (ret < 0) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
dev->irq_enabled = 0;
|
||||
dev->irq_enabled = false;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
dispc_free_irq(dev);
|
||||
}
|
||||
@ -294,11 +294,12 @@ int omap_drm_irq_install(struct drm_device *dev)
|
||||
int omap_drm_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
int irq_enabled, i;
|
||||
bool irq_enabled;
|
||||
int i;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
irq_enabled = dev->irq_enabled;
|
||||
dev->irq_enabled = 0;
|
||||
dev->irq_enabled = false;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/*
|
||||
@ -307,9 +308,9 @@ int omap_drm_irq_uninstall(struct drm_device *dev)
|
||||
if (dev->num_crtcs) {
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
for (i = 0; i < dev->num_crtcs; i++) {
|
||||
DRM_WAKEUP(&dev->vbl_queue[i]);
|
||||
dev->vblank_enabled[i] = 0;
|
||||
dev->last_vblank[i] =
|
||||
DRM_WAKEUP(&dev->vblank[i].queue);
|
||||
dev->vblank[i].enabled = false;
|
||||
dev->vblank[i].last =
|
||||
dev->driver->get_vblank_counter(dev, i);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
|
@ -225,7 +225,6 @@ static struct drm_driver qxl_driver = {
|
||||
.debugfs_init = qxl_debugfs_init,
|
||||
.debugfs_cleanup = qxl_debugfs_takedown,
|
||||
#endif
|
||||
.gem_init_object = qxl_gem_object_init,
|
||||
.gem_free_object = qxl_gem_object_free,
|
||||
.gem_open_object = qxl_gem_object_open,
|
||||
.gem_close_object = qxl_gem_object_close,
|
||||
|
@ -412,7 +412,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
|
||||
struct qxl_surface *surf,
|
||||
struct qxl_bo **qobj,
|
||||
uint32_t *handle);
|
||||
int qxl_gem_object_init(struct drm_gem_object *obj);
|
||||
void qxl_gem_object_free(struct drm_gem_object *gobj);
|
||||
int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
|
||||
void qxl_gem_object_close(struct drm_gem_object *obj,
|
||||
|
@ -28,12 +28,6 @@
|
||||
#include "qxl_drv.h"
|
||||
#include "qxl_object.h"
|
||||
|
||||
int qxl_gem_object_init(struct drm_gem_object *obj)
|
||||
{
|
||||
/* we do nothings here */
|
||||
return 0;
|
||||
}
|
||||
|
||||
void qxl_gem_object_free(struct drm_gem_object *gobj)
|
||||
{
|
||||
struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
|
||||
|
@ -690,8 +690,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
|
||||
|
||||
/* set the lane count on the sink */
|
||||
tmp = dp_info->dp_lane_count;
|
||||
if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
|
||||
dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
|
||||
if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
|
||||
tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
|
||||
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
|
||||
|
||||
|
@ -499,7 +499,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
|
||||
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
|
||||
fp2_gen_cntl = 0;
|
||||
|
||||
if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
|
||||
if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
|
||||
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
|
||||
}
|
||||
|
||||
@ -536,7 +536,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
|
||||
(RADEON_CRTC_SYNC_TRISTAT |
|
||||
RADEON_CRTC_DISPLAY_DIS)));
|
||||
|
||||
if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
|
||||
if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
|
||||
WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
|
||||
}
|
||||
|
||||
@ -554,7 +554,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
|
||||
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
|
||||
}
|
||||
WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
|
||||
if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
|
||||
if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
|
||||
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
|
||||
}
|
||||
return r;
|
||||
|
@ -100,7 +100,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
|
||||
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
|
||||
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
|
||||
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
|
||||
int radeon_gem_object_init(struct drm_gem_object *obj);
|
||||
void radeon_gem_object_free(struct drm_gem_object *obj);
|
||||
int radeon_gem_object_open(struct drm_gem_object *obj,
|
||||
struct drm_file *file_priv);
|
||||
@ -408,7 +407,6 @@ static struct drm_driver kms_driver = {
|
||||
.irq_uninstall = radeon_driver_irq_uninstall_kms,
|
||||
.irq_handler = radeon_driver_irq_handler_kms,
|
||||
.ioctls = radeon_ioctls_kms,
|
||||
.gem_init_object = radeon_gem_object_init,
|
||||
.gem_free_object = radeon_gem_object_free,
|
||||
.gem_open_object = radeon_gem_object_open,
|
||||
.gem_close_object = radeon_gem_object_close,
|
||||
|
@ -29,13 +29,6 @@
|
||||
#include <drm/radeon_drm.h>
|
||||
#include "radeon.h"
|
||||
|
||||
int radeon_gem_object_init(struct drm_gem_object *obj)
|
||||
{
|
||||
BUG();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_gem_object_free(struct drm_gem_object *gobj)
|
||||
{
|
||||
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
|
||||
|
@ -191,7 +191,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
|
||||
switch (info->request) {
|
||||
case RADEON_INFO_DEVICE_ID:
|
||||
*value = dev->pci_device;
|
||||
*value = dev->pdev->device;
|
||||
break;
|
||||
case RADEON_INFO_NUM_GB_PIPES:
|
||||
*value = rdev->num_gb_pipes;
|
||||
|
@ -77,7 +77,6 @@ static struct drm_driver driver = {
|
||||
.unload = udl_driver_unload,
|
||||
|
||||
/* gem hooks */
|
||||
.gem_init_object = udl_gem_init_object,
|
||||
.gem_free_object = udl_gem_free_object,
|
||||
.gem_vm_ops = &udl_gem_vm_ops,
|
||||
|
||||
|
@ -115,7 +115,6 @@ int udl_dumb_create(struct drm_file *file_priv,
|
||||
int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
|
||||
uint32_t handle, uint64_t *offset);
|
||||
|
||||
int udl_gem_init_object(struct drm_gem_object *obj);
|
||||
void udl_gem_free_object(struct drm_gem_object *gem_obj);
|
||||
struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
|
||||
size_t size);
|
||||
|
@ -107,13 +107,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
}
|
||||
}
|
||||
|
||||
int udl_gem_init_object(struct drm_gem_object *obj)
|
||||
{
|
||||
BUG();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
|
||||
{
|
||||
struct page **pages;
|
||||
|
@ -79,7 +79,7 @@ int via_final_context(struct drm_device *dev, int context)
|
||||
|
||||
/* Linux specific until context tracking code gets ported to BSD */
|
||||
/* Last context, perform cleanup */
|
||||
if (dev->ctx_count == 1 && dev->dev_private) {
|
||||
if (list_is_singular(&dev->ctxlist) && dev->dev_private) {
|
||||
DRM_DEBUG("Last Context\n");
|
||||
drm_irq_uninstall(dev);
|
||||
via_cleanup_futex(dev_priv);
|
||||
|
@ -264,7 +264,7 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
|
||||
* core, so we need to set this manually in order to allow the
|
||||
* DRM_IOCTL_WAIT_VBLANK to operate correctly.
|
||||
*/
|
||||
drm->irq_enabled = 1;
|
||||
drm->irq_enabled = true;
|
||||
|
||||
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
|
||||
if (err < 0)
|
||||
|
@ -396,14 +396,14 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
|
||||
|
||||
/*
|
||||
* enable drm irq mode.
|
||||
* - with irq_enabled = 1, we can use the vblank feature.
|
||||
* - with irq_enabled = true, we can use the vblank feature.
|
||||
*
|
||||
* P.S. note that we wouldn't use drm irq handler but
|
||||
* just specific driver own one instead because
|
||||
* drm framework supports only one irq handler and
|
||||
* drivers can well take care of their interrupts
|
||||
*/
|
||||
drm->irq_enabled = 1;
|
||||
drm->irq_enabled = true;
|
||||
|
||||
drm_mode_config_init(drm);
|
||||
imx_drm_mode_config_init(drm);
|
||||
@ -423,11 +423,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
|
||||
goto err_init;
|
||||
|
||||
/*
|
||||
* with vblank_disable_allowed = 1, vblank interrupt will be disabled
|
||||
* with vblank_disable_allowed = true, vblank interrupt will be disabled
|
||||
* by drm timer once a current process gives up ownership of
|
||||
* vblank event.(after drm_vblank_put function is called)
|
||||
*/
|
||||
imxdrm->drm->vblank_disable_allowed = 1;
|
||||
imxdrm->drm->vblank_disable_allowed = true;
|
||||
|
||||
if (!imx_drm_device_get())
|
||||
ret = -EINVAL;
|
||||
|
@ -670,8 +670,6 @@ struct drm_gem_object {
|
||||
uint32_t pending_read_domains;
|
||||
uint32_t pending_write_domain;
|
||||
|
||||
void *driver_private;
|
||||
|
||||
/**
|
||||
* dma_buf - dma buf associated with this GEM object
|
||||
*
|
||||
@ -925,7 +923,6 @@ struct drm_driver {
|
||||
*
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
int (*gem_init_object) (struct drm_gem_object *obj);
|
||||
void (*gem_free_object) (struct drm_gem_object *obj);
|
||||
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
|
||||
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
|
||||
@ -1084,6 +1081,19 @@ struct drm_pending_vblank_event {
|
||||
struct drm_event_vblank event;
|
||||
};
|
||||
|
||||
struct drm_vblank_crtc {
|
||||
wait_queue_head_t queue; /**< VBLANK wait queue */
|
||||
struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
|
||||
atomic_t count; /**< number of VBLANK interrupts */
|
||||
atomic_t refcount; /* number of users of vblank interruptsper crtc */
|
||||
u32 last; /* protected by dev->vbl_lock, used */
|
||||
/* for wraparound handling */
|
||||
u32 last_wait; /* Last vblank seqno waited per CRTC */
|
||||
unsigned int inmodeset; /* Display driver is setting mode */
|
||||
bool enabled; /* so we don't call enable more than
|
||||
once per disable */
|
||||
};
|
||||
|
||||
/**
|
||||
* DRM device structure. This structure represent a complete card that
|
||||
* may contain multiple heads.
|
||||
@ -1108,25 +1118,16 @@ struct drm_device {
|
||||
atomic_t buf_alloc; /**< Buffer allocation in progress */
|
||||
/*@} */
|
||||
|
||||
/** \name Performance counters */
|
||||
/*@{ */
|
||||
unsigned long counters;
|
||||
enum drm_stat_type types[15];
|
||||
atomic_t counts[15];
|
||||
/*@} */
|
||||
|
||||
struct list_head filelist;
|
||||
|
||||
/** \name Memory management */
|
||||
/*@{ */
|
||||
struct list_head maplist; /**< Linked list of regions */
|
||||
int map_count; /**< Number of mappable regions */
|
||||
struct drm_open_hash map_hash; /**< User token hash table for maps */
|
||||
|
||||
/** \name Context handle management */
|
||||
/*@{ */
|
||||
struct list_head ctxlist; /**< Linked list of context handles */
|
||||
int ctx_count; /**< Number of context handles */
|
||||
struct mutex ctxlist_mutex; /**< For ctxlist */
|
||||
|
||||
struct idr ctx_idr;
|
||||
@ -1142,12 +1143,11 @@ struct drm_device {
|
||||
|
||||
/** \name Context support */
|
||||
/*@{ */
|
||||
int irq_enabled; /**< True if irq handler is enabled */
|
||||
bool irq_enabled; /**< True if irq handler is enabled */
|
||||
__volatile__ long context_flag; /**< Context swapping flag */
|
||||
int last_context; /**< Last current context */
|
||||
/*@} */
|
||||
|
||||
struct work_struct work;
|
||||
/** \name VBLANK IRQ support */
|
||||
/*@{ */
|
||||
|
||||
@ -1157,20 +1157,13 @@ struct drm_device {
|
||||
* Once the modeset ioctl *has* been called though, we can safely
|
||||
* disable them when unused.
|
||||
*/
|
||||
int vblank_disable_allowed;
|
||||
bool vblank_disable_allowed;
|
||||
|
||||
/* array of size num_crtcs */
|
||||
struct drm_vblank_crtc *vblank;
|
||||
|
||||
wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
|
||||
atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
|
||||
struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
|
||||
spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
|
||||
spinlock_t vbl_lock;
|
||||
atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
|
||||
u32 *last_vblank; /* protected by dev->vbl_lock, used */
|
||||
/* for wraparound handling */
|
||||
int *vblank_enabled; /* so we don't call enable more than
|
||||
once per disable */
|
||||
int *vblank_inmodeset; /* Display driver is setting mode */
|
||||
u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */
|
||||
struct timer_list vblank_disable_timer;
|
||||
|
||||
u32 max_vblank_count; /**< size of vblank counter register */
|
||||
@ -1187,8 +1180,6 @@ struct drm_device {
|
||||
|
||||
struct device *dev; /**< Device structure */
|
||||
struct pci_dev *pdev; /**< PCI device structure */
|
||||
int pci_vendor; /**< PCI vendor id */
|
||||
int pci_device; /**< PCI device id */
|
||||
#ifdef __alpha__
|
||||
struct pci_controller *hose;
|
||||
#endif
|
||||
@ -1561,8 +1552,6 @@ int drm_gem_init(struct drm_device *dev);
|
||||
void drm_gem_destroy(struct drm_device *dev);
|
||||
void drm_gem_object_release(struct drm_gem_object *obj);
|
||||
void drm_gem_object_free(struct kref *kref);
|
||||
struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
|
||||
size_t size);
|
||||
int drm_gem_object_init(struct drm_device *dev,
|
||||
struct drm_gem_object *obj, size_t size);
|
||||
void drm_gem_private_object_init(struct drm_device *dev,
|
||||
@ -1650,9 +1639,11 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
|
||||
|
||||
#include <drm/drm_mem_util.h>
|
||||
|
||||
extern int drm_fill_in_dev(struct drm_device *dev,
|
||||
const struct pci_device_id *ent,
|
||||
struct drm_driver *driver);
|
||||
struct drm_device *drm_dev_alloc(struct drm_driver *driver,
|
||||
struct device *parent);
|
||||
void drm_dev_free(struct drm_device *dev);
|
||||
int drm_dev_register(struct drm_device *dev, unsigned long flags);
|
||||
void drm_dev_unregister(struct drm_device *dev);
|
||||
int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
|
||||
/*@}*/
|
||||
|
||||
|
@ -973,6 +973,7 @@ extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_m
|
||||
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
|
||||
extern struct edid *drm_get_edid(struct drm_connector *connector,
|
||||
struct i2c_adapter *adapter);
|
||||
extern struct edid *drm_edid_duplicate(const struct edid *edid);
|
||||
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
|
||||
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
|
||||
extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
|
||||
|
@ -77,10 +77,10 @@
|
||||
#define DP_DOWNSTREAMPORT_PRESENT 0x005
|
||||
# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
|
||||
# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
|
||||
/* 00b = DisplayPort */
|
||||
/* 01b = Analog */
|
||||
/* 10b = TMDS or HDMI */
|
||||
/* 11b = Other */
|
||||
# define DP_DWN_STRM_PORT_TYPE_DP (0 << 1)
|
||||
# define DP_DWN_STRM_PORT_TYPE_ANALOG (1 << 1)
|
||||
# define DP_DWN_STRM_PORT_TYPE_TMDS (2 << 1)
|
||||
# define DP_DWN_STRM_PORT_TYPE_OTHER (3 << 1)
|
||||
# define DP_FORMAT_CONVERSION (1 << 3)
|
||||
# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
|
||||
|
||||
@ -333,20 +333,20 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
|
||||
|
||||
|
||||
#define DP_LINK_STATUS_SIZE 6
|
||||
bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane_count);
|
||||
bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane_count);
|
||||
u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane);
|
||||
u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane);
|
||||
|
||||
#define DP_RECEIVER_CAP_SIZE 0xf
|
||||
#define EDP_PSR_RECEIVER_CAP_SIZE 2
|
||||
|
||||
void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
|
||||
void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
|
||||
void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
|
||||
void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
|
||||
|
||||
u8 drm_dp_link_rate_to_bw_code(int link_rate);
|
||||
int drm_dp_bw_code_to_link_rate(u8 link_bw);
|
||||
@ -379,15 +379,22 @@ struct edp_vsc_psr {
|
||||
#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2)
|
||||
|
||||
static inline int
|
||||
drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
{
|
||||
return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
|
||||
}
|
||||
|
||||
static inline u8
|
||||
drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
{
|
||||
return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
{
|
||||
return dpcd[DP_DPCD_REV] >= 0x11 &&
|
||||
(dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
|
||||
}
|
||||
|
||||
#endif /* _DRM_DP_HELPER_H_ */
|
||||
|
Loading…
Reference in New Issue
Block a user