Merge branch 'pci/devres'
- Add pcim_add_mapping_to_legacy_table() and pcim_remove_mapping_from_legacy_table() helper functions to simplify devres iomap table (Philipp Stanner) - Reimplement devres that take a bit mask of BARs in a way that can be used to map partial BARs as well as entire BARs (Philipp Stanner) - Deprecate pcim_iomap_table() and pcim_iomap_regions_request_all() in favor of pcim_* request plus pcim_* mapping (Philipp Stanner) - Add pcim_request_region(), a managed interface to request a single BAR (Philipp Stanner) - Use the existing pci_is_enabled() interface to replace the struct devres.enabled bit (Philipp Stanner) - Move the struct pci_devres.pinned bit to struct pci_dev (Philipp Stanner) - Reimplement pcim_set_mwi() so it uses its own devres cleanup callback instead of a special-purpose bit in struct pci_devres (Philipp Stanner) - Add pcim_intx(), which is unambiguously managed, unlike pci_intx(), which is managed if pcim_enable_device() has been called but unmanaged otherwise (Philipp Stanner) - Remove pcim_release(), which is no longer needed after previous cleanups of pcim_set_mwi() and pci_intx() (Philipp Stanner) - Add pcim_iomap_range(), a managed interface to map part of a BAR (Philipp Stanner) - Fix vboxvideo leak by using the new pcim_iomap_range() instead of the unmanaged pci_iomap_range() (Philipp Stanner) * pci/devres: drm/vboxvideo: fix mapping leaks PCI: Add managed pcim_iomap_range() PCI: Remove legacy pcim_release() PCI: Add managed pcim_intx() PCI: Give pcim_set_mwi() its own devres cleanup callback PCI: Move struct pci_devres.pinned bit to struct pci_dev PCI: Remove struct pci_devres.enabled status bit PCI: Document hybrid devres hazards PCI: Add managed pcim_request_region() PCI: Deprecate pcim_iomap_table(), pcim_iomap_regions_request_all() PCI: Add managed partial-BAR request and map infrastructure PCI: Add devres helpers for iomap table PCI: Add and use devres helper for bit masks
This commit is contained in:
commit
06bbe25c21
@ -42,12 +42,11 @@ static int vbox_accel_init(struct vbox_private *vbox)
|
||||
/* Take a command buffer for each screen from the end of usable VRAM. */
|
||||
vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
|
||||
|
||||
vbox->vbva_buffers = pci_iomap_range(pdev, 0,
|
||||
vbox->available_vram_size,
|
||||
vbox->num_crtcs *
|
||||
VBVA_MIN_BUFFER_SIZE);
|
||||
if (!vbox->vbva_buffers)
|
||||
return -ENOMEM;
|
||||
vbox->vbva_buffers = pcim_iomap_range(
|
||||
pdev, 0, vbox->available_vram_size,
|
||||
vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE);
|
||||
if (IS_ERR(vbox->vbva_buffers))
|
||||
return PTR_ERR(vbox->vbva_buffers);
|
||||
|
||||
for (i = 0; i < vbox->num_crtcs; ++i) {
|
||||
vbva_setup_buffer_context(&vbox->vbva_info[i],
|
||||
@ -116,11 +115,10 @@ int vbox_hw_init(struct vbox_private *vbox)
|
||||
DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
|
||||
|
||||
/* Map guest-heap at end of vram */
|
||||
vbox->guest_heap =
|
||||
pci_iomap_range(pdev, 0, GUEST_HEAP_OFFSET(vbox),
|
||||
GUEST_HEAP_SIZE);
|
||||
if (!vbox->guest_heap)
|
||||
return -ENOMEM;
|
||||
vbox->guest_heap = pcim_iomap_range(pdev, 0,
|
||||
GUEST_HEAP_OFFSET(vbox), GUEST_HEAP_SIZE);
|
||||
if (IS_ERR(vbox->guest_heap))
|
||||
return PTR_ERR(vbox->guest_heap);
|
||||
|
||||
/* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
|
||||
vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -23,6 +23,10 @@
|
||||
*
|
||||
* @maxlen specifies the maximum length to map. If you want to get access to
|
||||
* the complete BAR from offset to the end, pass %0 here.
|
||||
*
|
||||
* NOTE:
|
||||
* This function is never managed, even if you initialized with
|
||||
* pcim_enable_device().
|
||||
* */
|
||||
void __iomem *pci_iomap_range(struct pci_dev *dev,
|
||||
int bar,
|
||||
@ -63,6 +67,10 @@ EXPORT_SYMBOL(pci_iomap_range);
|
||||
*
|
||||
* @maxlen specifies the maximum length to map. If you want to get access to
|
||||
* the complete BAR from offset to the end, pass %0 here.
|
||||
*
|
||||
* NOTE:
|
||||
* This function is never managed, even if you initialized with
|
||||
* pcim_enable_device().
|
||||
* */
|
||||
void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
|
||||
int bar,
|
||||
@ -106,6 +114,10 @@ EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
|
||||
*
|
||||
* @maxlen specifies the maximum length to map. If you want to get access to
|
||||
* the complete BAR without checking for its length first, pass %0 here.
|
||||
*
|
||||
* NOTE:
|
||||
* This function is never managed, even if you initialized with
|
||||
* pcim_enable_device(). If you need automatic cleanup, use pcim_iomap().
|
||||
* */
|
||||
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
|
||||
{
|
||||
@ -127,6 +139,10 @@ EXPORT_SYMBOL(pci_iomap);
|
||||
*
|
||||
* @maxlen specifies the maximum length to map. If you want to get access to
|
||||
* the complete BAR without checking for its length first, pass %0 here.
|
||||
*
|
||||
* NOTE:
|
||||
* This function is never managed, even if you initialized with
|
||||
* pcim_enable_device().
|
||||
* */
|
||||
void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
|
||||
{
|
||||
|
@ -2247,12 +2247,6 @@ void pci_disable_enabled_device(struct pci_dev *dev)
|
||||
*/
|
||||
void pci_disable_device(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_devres *dr;
|
||||
|
||||
dr = find_pci_dr(dev);
|
||||
if (dr)
|
||||
dr->enabled = 0;
|
||||
|
||||
dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
|
||||
"disabling already-disabled device");
|
||||
|
||||
@ -3901,7 +3895,15 @@ EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
|
||||
*/
|
||||
void pci_release_region(struct pci_dev *pdev, int bar)
|
||||
{
|
||||
struct pci_devres *dr;
|
||||
/*
|
||||
* This is done for backwards compatibility, because the old PCI devres
|
||||
* API had a mode in which the function became managed if it had been
|
||||
* enabled with pcim_enable_device() instead of pci_enable_device().
|
||||
*/
|
||||
if (pci_is_managed(pdev)) {
|
||||
pcim_release_region(pdev, bar);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pci_resource_len(pdev, bar) == 0)
|
||||
return;
|
||||
@ -3911,10 +3913,6 @@ void pci_release_region(struct pci_dev *pdev, int bar)
|
||||
else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
|
||||
release_mem_region(pci_resource_start(pdev, bar),
|
||||
pci_resource_len(pdev, bar));
|
||||
|
||||
dr = find_pci_dr(pdev);
|
||||
if (dr)
|
||||
dr->region_mask &= ~(1 << bar);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_release_region);
|
||||
|
||||
@ -3925,6 +3923,8 @@ EXPORT_SYMBOL(pci_release_region);
|
||||
* @res_name: Name to be associated with resource.
|
||||
* @exclusive: whether the region access is exclusive or not
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* Mark the PCI region associated with PCI device @pdev BAR @bar as
|
||||
* being reserved by owner @res_name. Do not access any
|
||||
* address inside the PCI regions unless this call returns
|
||||
@ -3940,7 +3940,12 @@ EXPORT_SYMBOL(pci_release_region);
|
||||
static int __pci_request_region(struct pci_dev *pdev, int bar,
|
||||
const char *res_name, int exclusive)
|
||||
{
|
||||
struct pci_devres *dr;
|
||||
if (pci_is_managed(pdev)) {
|
||||
if (exclusive == IORESOURCE_EXCLUSIVE)
|
||||
return pcim_request_region_exclusive(pdev, bar, res_name);
|
||||
|
||||
return pcim_request_region(pdev, bar, res_name);
|
||||
}
|
||||
|
||||
if (pci_resource_len(pdev, bar) == 0)
|
||||
return 0;
|
||||
@ -3956,10 +3961,6 @@ static int __pci_request_region(struct pci_dev *pdev, int bar,
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
dr = find_pci_dr(pdev);
|
||||
if (dr)
|
||||
dr->region_mask |= 1 << bar;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
@ -3974,6 +3975,8 @@ err_out:
|
||||
* @bar: BAR to be reserved
|
||||
* @res_name: Name to be associated with resource
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* Mark the PCI region associated with PCI device @pdev BAR @bar as
|
||||
* being reserved by owner @res_name. Do not access any
|
||||
* address inside the PCI regions unless this call returns
|
||||
@ -3981,6 +3984,11 @@ err_out:
|
||||
*
|
||||
* Returns 0 on success, or %EBUSY on error. A warning
|
||||
* message is also printed on failure.
|
||||
*
|
||||
* NOTE:
|
||||
* This is a "hybrid" function: It's normally unmanaged, but becomes managed
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
|
||||
{
|
||||
@ -4031,6 +4039,13 @@ err_out:
|
||||
* @pdev: PCI device whose resources are to be reserved
|
||||
* @bars: Bitmask of BARs to be requested
|
||||
* @res_name: Name to be associated with resource
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* NOTE:
|
||||
* This is a "hybrid" function: It's normally unmanaged, but becomes managed
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_selected_regions(struct pci_dev *pdev, int bars,
|
||||
const char *res_name)
|
||||
@ -4039,6 +4054,19 @@ int pci_request_selected_regions(struct pci_dev *pdev, int bars,
|
||||
}
|
||||
EXPORT_SYMBOL(pci_request_selected_regions);
|
||||
|
||||
/**
|
||||
* pci_request_selected_regions_exclusive - Request regions exclusively
|
||||
* @pdev: PCI device to request regions from
|
||||
* @bars: bit mask of BARs to request
|
||||
* @res_name: name to be associated with the requests
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* NOTE:
|
||||
* This is a "hybrid" function: It's normally unmanaged, but becomes managed
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
|
||||
const char *res_name)
|
||||
{
|
||||
@ -4056,7 +4084,6 @@ EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
|
||||
* successful call to pci_request_regions(). Call this function only
|
||||
* after all use of the PCI regions has ceased.
|
||||
*/
|
||||
|
||||
void pci_release_regions(struct pci_dev *pdev)
|
||||
{
|
||||
pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
|
||||
@ -4075,6 +4102,11 @@ EXPORT_SYMBOL(pci_release_regions);
|
||||
*
|
||||
* Returns 0 on success, or %EBUSY on error. A warning
|
||||
* message is also printed on failure.
|
||||
*
|
||||
* NOTE:
|
||||
* This is a "hybrid" function: It's normally unmanaged, but becomes managed
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_regions(struct pci_dev *pdev, const char *res_name)
|
||||
{
|
||||
@ -4088,6 +4120,8 @@ EXPORT_SYMBOL(pci_request_regions);
|
||||
* @pdev: PCI device whose resources are to be reserved
|
||||
* @res_name: Name to be associated with resource.
|
||||
*
|
||||
* Returns: 0 on success, negative error code on failure.
|
||||
*
|
||||
* Mark all PCI regions associated with PCI device @pdev as being reserved
|
||||
* by owner @res_name. Do not access any address inside the PCI regions
|
||||
* unless this call returns successfully.
|
||||
@ -4097,6 +4131,11 @@ EXPORT_SYMBOL(pci_request_regions);
|
||||
*
|
||||
* Returns 0 on success, or %EBUSY on error. A warning message is also
|
||||
* printed on failure.
|
||||
*
|
||||
* NOTE:
|
||||
* This is a "hybrid" function: It's normally unmanaged, but becomes managed
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use the pcim_* functions instead.
|
||||
*/
|
||||
int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
|
||||
{
|
||||
@ -4428,11 +4467,22 @@ void pci_disable_parity(struct pci_dev *dev)
|
||||
* @enable: boolean: whether to enable or disable PCI INTx
|
||||
*
|
||||
* Enables/disables PCI INTx for device @pdev
|
||||
*
|
||||
* NOTE:
|
||||
* This is a "hybrid" function: It's normally unmanaged, but becomes managed
|
||||
* when pcim_enable_device() has been called in advance. This hybrid feature is
|
||||
* DEPRECATED! If you want managed cleanup, use pcim_intx() instead.
|
||||
*/
|
||||
void pci_intx(struct pci_dev *pdev, int enable)
|
||||
{
|
||||
u16 pci_command, new;
|
||||
|
||||
/* Preserve the "hybrid" behavior for backwards compatibility */
|
||||
if (pci_is_managed(pdev)) {
|
||||
WARN_ON_ONCE(pcim_intx(pdev, enable) != 0);
|
||||
return;
|
||||
}
|
||||
|
||||
pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
|
||||
|
||||
if (enable)
|
||||
@ -4440,17 +4490,8 @@ void pci_intx(struct pci_dev *pdev, int enable)
|
||||
else
|
||||
new = pci_command | PCI_COMMAND_INTX_DISABLE;
|
||||
|
||||
if (new != pci_command) {
|
||||
struct pci_devres *dr;
|
||||
|
||||
if (new != pci_command)
|
||||
pci_write_config_word(pdev, PCI_COMMAND, new);
|
||||
|
||||
dr = find_pci_dr(pdev);
|
||||
if (dr && !dr->restore_intx) {
|
||||
dr->restore_intx = 1;
|
||||
dr->orig_intx = !enable;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pci_intx);
|
||||
|
||||
|
@ -810,26 +810,12 @@ static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X
|
||||
* on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so
|
||||
* there's no need to track it separately. pci_devres is initialized
|
||||
* when a device is enabled using managed PCI device enable interface.
|
||||
*
|
||||
* TODO: Struct pci_devres and find_pci_dr() only need to be here because
|
||||
* they're used in pci.c. Port or move these functions to devres.c and
|
||||
* then remove them from here.
|
||||
*/
|
||||
struct pci_devres {
|
||||
unsigned int enabled:1;
|
||||
unsigned int pinned:1;
|
||||
unsigned int orig_intx:1;
|
||||
unsigned int restore_intx:1;
|
||||
unsigned int mwi:1;
|
||||
u32 region_mask;
|
||||
};
|
||||
int pcim_intx(struct pci_dev *dev, int enable);
|
||||
|
||||
struct pci_devres *find_pci_dr(struct pci_dev *pdev);
|
||||
int pcim_request_region(struct pci_dev *pdev, int bar, const char *name);
|
||||
int pcim_request_region_exclusive(struct pci_dev *pdev, int bar,
|
||||
const char *name);
|
||||
void pcim_release_region(struct pci_dev *pdev, int bar);
|
||||
|
||||
/*
|
||||
* Config Address for PCI Configuration Mechanism #1
|
||||
|
@ -367,10 +367,11 @@ struct pci_dev {
|
||||
this is D0-D3, D0 being fully
|
||||
functional, and D3 being off. */
|
||||
u8 pm_cap; /* PM capability offset */
|
||||
unsigned int imm_ready:1; /* Supports Immediate Readiness */
|
||||
unsigned int pme_support:5; /* Bitmask of states from which PME#
|
||||
can be generated */
|
||||
unsigned int pme_poll:1; /* Poll device's PME status bit */
|
||||
unsigned int pinned:1; /* Whether this dev is pinned */
|
||||
unsigned int imm_ready:1; /* Supports Immediate Readiness */
|
||||
unsigned int d1_support:1; /* Low power state D1 is supported */
|
||||
unsigned int d2_support:1; /* Low power state D2 is supported */
|
||||
unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
|
||||
@ -2302,6 +2303,8 @@ int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name);
|
||||
int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
|
||||
const char *name);
|
||||
void pcim_iounmap_regions(struct pci_dev *pdev, int mask);
|
||||
void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar,
|
||||
unsigned long offset, unsigned long len);
|
||||
|
||||
extern int pci_pci_problems;
|
||||
#define PCIPCI_FAIL 1 /* No PCI PCI DMA */
|
||||
|
Loading…
Reference in New Issue
Block a user