amdgpu_fence_process(ring);
seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
- seq_printf(m, "Last signaled fence 0x%08x\n",
+ seq_printf(m, "Last signaled fence 0x%08x\n",
atomic_read(&ring->fence_drv.last_seq));
- seq_printf(m, "Last emitted 0x%08x\n",
+ seq_printf(m, "Last emitted 0x%08x\n",
ring->fence_drv.sync_seq);
+ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
+ ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
+ seq_printf(m, "Last signaled trailing fence 0x%08x\n",
+ le32_to_cpu(*ring->trail_fence_cpu_addr));
+ seq_printf(m, "Last emitted 0x%08x\n",
+ ring->trail_seq);
+ }
+
if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
continue;
/* set in CP_VMID_PREEMPT and preemption occurred */
- seq_printf(m, "Last preempted 0x%08x\n",
+ seq_printf(m, "Last preempted 0x%08x\n",
le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
/* set in CP_VMID_RESET and reset occurred */
- seq_printf(m, "Last reset 0x%08x\n",
+ seq_printf(m, "Last reset 0x%08x\n",
le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
/* Both preemption and reset occurred */
- seq_printf(m, "Last both 0x%08x\n",
+ seq_printf(m, "Last both 0x%08x\n",
le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
}
return 0;
return r;
}
+ r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
+ if (r) {
+ dev_err(adev->dev,
+ "(%d) ring trail_fence_offs wb alloc failed\n", r);
+ return r;
+ }
+ ring->trail_fence_gpu_addr =
+ adev->wb.gpu_addr + (ring->trail_fence_offs * 4);
+ ring->trail_fence_cpu_addr = &adev->wb.wb[ring->trail_fence_offs];
+
r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
if (r) {
dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);