0b9d37609a
For sriov, psp ip block has to be initialized before ih block for the dynamic register programming interface that needed for vf ih ring buffer. On the other hand, current psp ip block hw_init function will initialize xgmi session which actaully depends on interrupt to return session context. This results an empty xgmi ta session id and later failures on all the xgmi ta cmd invoked from vf. xgmi ta session initialization has to be done after ih ip block hw_init call. to unify xgmi session init/fini for both bare-metal sriov virtualization use scenario, move xgmi ta init to xgmi_add_device call, and accordingly terminate xgmi ta session in xgmi_remove_device call. The existing suspend/resume sequence will not be changed. v2: squash in return fix from Nirmoy Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com> Reviewed-by: Frank Min <Frank.Min@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
59 lines
2.3 KiB
C
59 lines
2.3 KiB
C
/*
|
|
* Copyright 2016 Advanced Micro Devices, Inc.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
*/
|
|
#ifndef __AMDGPU_XGMI_H__
|
|
#define __AMDGPU_XGMI_H__
|
|
|
|
#include <drm/task_barrier.h>
|
|
#include "amdgpu_psp.h"
|
|
|
|
struct amdgpu_hive_info {
|
|
uint64_t hive_id;
|
|
struct list_head device_list;
|
|
int number_devices;
|
|
struct mutex hive_lock, reset_lock;
|
|
struct kobject *kobj;
|
|
struct device_attribute dev_attr;
|
|
struct amdgpu_device *adev;
|
|
int pstate; /*0 -- low , 1 -- high , -1 unknown*/
|
|
struct task_barrier tb;
|
|
};
|
|
|
|
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
|
|
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
|
|
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
|
|
int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
|
|
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
|
|
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
|
|
struct amdgpu_device *peer_adev);
|
|
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
|
|
void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
|
|
|
|
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
|
|
struct amdgpu_device *bo_adev)
|
|
{
|
|
return (adev != bo_adev &&
|
|
adev->gmc.xgmi.hive_id &&
|
|
adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
|
|
}
|
|
|
|
#endif
|