diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index a82c0601a294..d3f4832db289 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -141,12 +141,17 @@ union drm_amdgpu_bo_list {
 
 /* GPU reset status */
 #define AMDGPU_CTX_NO_RESET		0
-#define AMDGPU_CTX_GUILTY_RESET		1 /* this the context caused it */
-#define AMDGPU_CTX_INNOCENT_RESET	2 /* some other context caused it */
-#define AMDGPU_CTX_UNKNOWN_RESET	3 /* unknown cause */
+/* this the context caused it */
+#define AMDGPU_CTX_GUILTY_RESET		1
+/* some other context caused it */
+#define AMDGPU_CTX_INNOCENT_RESET	2
+/* unknown cause */
+#define AMDGPU_CTX_UNKNOWN_RESET	3
 
 struct drm_amdgpu_ctx_in {
+	/** AMDGPU_CTX_OP_* */
 	uint32_t	op;
+	/** For future use, no flags defined so far */
 	uint32_t	flags;
 	uint32_t	ctx_id;
 	uint32_t	_pad;
@@ -159,6 +164,7 @@ union drm_amdgpu_ctx_out {
 		} alloc;
 
 		struct {
+			/** For future use, no flags defined so far */
 			uint64_t	flags;
 			/** Number of resets caused by this context so far. */
 			uint32_t	hangs;
@@ -185,7 +191,9 @@ union drm_amdgpu_ctx {
 struct drm_amdgpu_gem_userptr {
 	uint64_t		addr;
 	uint64_t		size;
+	/* AMDGPU_GEM_USERPTR_* */
 	uint32_t		flags;
+	/* Resulting GEM handle */
 	uint32_t		handle;
 };
 
@@ -217,23 +225,29 @@ struct drm_amdgpu_gem_userptr {
 
 /** The same structure is shared for input/output */
 struct drm_amdgpu_gem_metadata {
-	uint32_t	handle;		/* GEM Object handle */
-	uint32_t	op;		/** Do we want get or set metadata */
+	/** GEM Object handle */
+	uint32_t	handle;
+	/** Do we want get or set metadata */
+	uint32_t	op;
 	struct {
+		/** For future use, no flags defined so far */
 		uint64_t	flags;
-		uint64_t	tiling_info; /* family specific tiling info */
+		/** family specific tiling info */
+		uint64_t	tiling_info;
 		uint32_t	data_size_bytes;
 		uint32_t	data[64];
 	} data;
 };
 
 struct drm_amdgpu_gem_mmap_in {
-	uint32_t handle;		/** the GEM object handle */
+	/** the GEM object handle */
+	uint32_t handle;
 	uint32_t _pad;
 };
 
 struct drm_amdgpu_gem_mmap_out {
-	uint64_t addr_ptr;	/** mmap offset from the vma offset manager */
+	/** mmap offset from the vma offset manager */
+	uint64_t addr_ptr;
 };
 
 union drm_amdgpu_gem_mmap {
@@ -242,14 +256,19 @@ union drm_amdgpu_gem_mmap {
 };
 
 struct drm_amdgpu_gem_wait_idle_in {
-	uint32_t handle;   	/* GEM object handle */
+	/** GEM object handle */
+	uint32_t handle;
+	/** For future use, no flags defined so far */
 	uint32_t flags;
-	uint64_t timeout; 	/* Timeout to wait. If 0 then returned immediately with the status */
+	/** Absolute timeout to wait */
+	uint64_t timeout;
 };
 
 struct drm_amdgpu_gem_wait_idle_out {
-	uint32_t status;	/*   BO status:  0 - BO is idle, 1 - BO is busy */
-	uint32_t domain; /*  Returned current memory domain */
+	/** BO status:  0 - BO is idle, 1 - BO is busy */
+	uint32_t status;
+	/** Returned current memory domain */
+	uint32_t domain;
 };
 
 union drm_amdgpu_gem_wait_idle {
@@ -258,7 +277,9 @@ union drm_amdgpu_gem_wait_idle {
 };
 
 struct drm_amdgpu_wait_cs_in {
+	/** Command submission handle */
 	uint64_t handle;
+	/** Absolute timeout to wait */
 	uint64_t timeout;
 	uint32_t ip_type;
 	uint32_t ip_instance;
@@ -267,6 +288,7 @@ struct drm_amdgpu_wait_cs_in {
 };
 
 struct drm_amdgpu_wait_cs_out {
+	/** CS status:  0 - CS completed, 1 - CS still busy */
 	uint64_t status;
 };
 
@@ -275,16 +297,19 @@ union drm_amdgpu_wait_cs {
 	struct drm_amdgpu_wait_cs_out out;
 };
 
-/* Sets or returns a value associated with a buffer. */
-struct drm_amdgpu_gem_op {
-	uint32_t	handle; /* buffer */
-	uint32_t	op;     /* AMDGPU_GEM_OP_* */
-	uint64_t	value;  /* input or return value */
-};
-
 #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO	0
 #define AMDGPU_GEM_OP_SET_PLACEMENT		1
 
+/* Sets or returns a value associated with a buffer. */
+struct drm_amdgpu_gem_op {
+	/** GEM object handle */
+	uint32_t	handle;
+	/** AMDGPU_GEM_OP_* */
+	uint32_t	op;
+	/** Input or return value */
+	uint64_t	value;
+};
+
 #define AMDGPU_VA_OP_MAP			1
 #define AMDGPU_VA_OP_UNMAP			2
 
@@ -297,19 +322,18 @@ struct drm_amdgpu_gem_op {
 #define AMDGPU_VM_PAGE_EXECUTABLE	(1 << 3)
 
 struct drm_amdgpu_gem_va {
-	/* GEM object handle */
+	/** GEM object handle */
 	uint32_t handle;
 	uint32_t _pad;
-	/* map or unmap*/
+	/** AMDGPU_VA_OP_* */
 	uint32_t operation;
-	/* specify mapping flags */
+	/** AMDGPU_VM_PAGE_* */
 	uint32_t flags;
-	/* va address to assign . Must be correctly aligned.*/
+	/** va address to assign . Must be correctly aligned.*/
 	uint64_t va_address;
-	/* Specify offset inside of BO to assign. Must be correctly aligned.*/
+	/** Specify offset inside of BO to assign. Must be correctly aligned.*/
 	uint64_t offset_in_bo;
-	/* Specify mapping size. If 0 and offset is 0 then map the whole BO.*/
-	/* Must be correctly aligned. */
+	/** Specify mapping size. Must be correctly aligned. */
 	uint64_t map_size;
 };
 
@@ -324,6 +348,7 @@ struct drm_amdgpu_gem_va {
 
 #define AMDGPU_CHUNK_ID_IB		0x01
 #define AMDGPU_CHUNK_ID_FENCE		0x02
+
 struct drm_amdgpu_cs_chunk {
 	uint32_t		chunk_id;
 	uint32_t		length_dw;
@@ -337,7 +362,7 @@ struct drm_amdgpu_cs_in {
 	uint32_t		bo_list_handle;
 	uint32_t		num_chunks;
 	uint32_t		_pad;
-	/* this points to uint64_t * which point to cs chunks */
+	/** this points to uint64_t * which point to cs chunks */
 	uint64_t		chunks;
 };
 
@@ -346,8 +371,8 @@ struct drm_amdgpu_cs_out {
 };
 
 union drm_amdgpu_cs {
-       struct drm_amdgpu_cs_in in;
-       struct drm_amdgpu_cs_out out;
+	struct drm_amdgpu_cs_in in;
+	struct drm_amdgpu_cs_out out;
 };
 
 /* Specify flags to be used for IB */
@@ -360,12 +385,18 @@ union drm_amdgpu_cs {
 
 struct drm_amdgpu_cs_chunk_ib {
 	uint32_t _pad;
-	uint32_t flags;		/* IB Flags */
-	uint64_t va_start;	/* Virtual address to begin IB execution */
-	uint32_t ib_bytes;	/* Size of submission */
-	uint32_t ip_type;	/* HW IP to submit to */
-	uint32_t ip_instance;	/* HW IP index of the same type to submit to  */
-	uint32_t ring;		/* Ring index to submit to */
+	/** AMDGPU_IB_FLAG_* */
+	uint32_t flags;
+	/** Virtual address to begin IB execution */
+	uint64_t va_start;
+	/** Size of submission */
+	uint32_t ib_bytes;
+	/** HW IP to submit to */
+	uint32_t ip_type;
+	/** HW IP index of the same type to submit to  */
+	uint32_t ip_instance;
+	/** Ring index to submit to */
+	uint32_t ring;
 };
 
 struct drm_amdgpu_cs_chunk_fence {
@@ -460,23 +491,28 @@ struct drm_amdgpu_info {
 			/** AMDGPU_HW_IP_* */
 			uint32_t type;
 			/**
-			 * Index of the IP if there are more IPs of the same type.
-			 * Ignored by AMDGPU_INFO_HW_IP_COUNT.
+			 * Index of the IP if there are more IPs of the same
+			 * type. Ignored by AMDGPU_INFO_HW_IP_COUNT.
 			 */
 			uint32_t ip_instance;
 		} query_hw_ip;
 
 		struct {
 			uint32_t dword_offset;
-			uint32_t count; /* number of registers to read */
+			/** number of registers to read */
+			uint32_t count;
 			uint32_t instance;
+			/** For future use, no flags defined so far */
 			uint32_t flags;
 		} read_mmr_reg;
 
 		struct {
 			/** AMDGPU_INFO_FW_* */
 			uint32_t fw_type;
-			/** Index of the IP if there are more IPs of the same type. */
+			/**
+			 * Index of the IP if there are more IPs of
+			 * the same type.
+			 */
 			uint32_t ip_instance;
 			/**
 			 * Index of the engine. Whether this is used depends
@@ -537,9 +573,10 @@ struct drm_amdgpu_info_device {
 	uint32_t family;
 	uint32_t num_shader_engines;
 	uint32_t num_shader_arrays_per_engine;
-	uint32_t gpu_counter_freq; /* in KHz */
-	uint64_t max_engine_clock; /* in KHz */
-	uint64_t max_memory_clock; /* in KHz */
+	/* in KHz */
+	uint32_t gpu_counter_freq;
+	uint64_t max_engine_clock;
+	uint64_t max_memory_clock;
 	/* cu information */
 	uint32_t cu_active_number;
 	uint32_t cu_ao_mask;