drm/i915: Serialize against vma moves
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 19 Aug 2019 11:20:33 +0000 (12:20 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 19 Aug 2019 14:25:56 +0000 (15:25 +0100)
Make sure that when submitting requests, we always serialize against
potential vma moves and clflushes.

Time for a i915_request_await_vma() interface!

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190819112033.30638-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
drivers/gpu/drm/i915/gt/intel_renderstate.c
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
drivers/gpu/drm/i915/gt/selftest_lrc.c
drivers/gpu/drm/i915/gt/selftest_workarounds.c
drivers/gpu/drm/i915/selftests/i915_request.c

index 77a201b..577bd7c 100644 (file)
@@ -1192,8 +1192,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
                goto skip_request;
 
        i915_vma_lock(batch);
-       GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
-       err = i915_vma_move_to_active(batch, rq, 0);
+       err = i915_request_await_object(rq, batch->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(batch, rq, 0);
        i915_vma_unlock(batch);
        if (err)
                goto skip_request;
index 784585a..6415f9a 100644 (file)
@@ -106,7 +106,9 @@ int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
        int err;
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, 0);
+       err = i915_request_await_object(rq, vma->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, 0);
        i915_vma_unlock(vma);
        if (unlikely(err))
                return err;
@@ -171,7 +173,9 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
        }
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_request_await_object(rq, vma->obj, true);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unlock(vma);
        if (unlikely(err))
                goto out_request;
index a1a4b53..0ff7a89 100644 (file)
@@ -228,7 +228,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
        intel_ring_advance(rq, cs);
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_request_await_object(rq, vma->obj, true);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unlock(vma);
        i915_vma_unpin(vma);
 
index dd87e6c..3e6f4a6 100644 (file)
@@ -666,13 +666,17 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
                goto err_request;
 
        i915_vma_lock(batch);
-       err = i915_vma_move_to_active(batch, rq, 0);
+       err = i915_request_await_object(rq, batch->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(batch, rq, 0);
        i915_vma_unlock(batch);
        if (err)
                goto skip_request;
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_request_await_object(rq, vma->obj, true);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unlock(vma);
        if (err)
                goto skip_request;
@@ -1218,7 +1222,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
                goto err_request;
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, 0);
+       err = i915_request_await_object(rq, vma->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, 0);
        i915_vma_unlock(vma);
        if (err)
                goto skip_request;
@@ -1315,7 +1321,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
                goto err_request;
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_request_await_object(rq, vma->obj, true);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unlock(vma);
        if (err)
                goto skip_request;
index 50aa7e9..1d27bab 100644 (file)
@@ -351,7 +351,10 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
                }
 
                i915_vma_lock(vma);
-               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+               err = i915_request_await_object(rq, vma->obj, true);
+               if (err == 0)
+                       err = i915_vma_move_to_active(vma, rq,
+                                                     EXEC_OBJECT_WRITE);
                i915_vma_unlock(vma);
 
                i915_request_add(rq);
index 42e1e9c..57ece53 100644 (file)
@@ -139,13 +139,17 @@ int igt_gpu_fill_dw(struct i915_vma *vma,
                goto err_request;
 
        i915_vma_lock(batch);
-       err = i915_vma_move_to_active(batch, rq, 0);
+       err = i915_request_await_object(rq, batch->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(batch, rq, 0);
        i915_vma_unlock(batch);
        if (err)
                goto skip_request;
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_request_await_object(rq, vma->obj, true);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unlock(vma);
        if (err)
                goto skip_request;
index be37d45..6d05f9c 100644 (file)
@@ -222,7 +222,9 @@ int intel_renderstate_emit(struct i915_request *rq)
        }
 
        i915_vma_lock(so.vma);
-       err = i915_vma_move_to_active(so.vma, rq, 0);
+       err = i915_request_await_object(rq, so.vma->obj, false);
+       if (err == 0)
+               err = i915_vma_move_to_active(so.vma, rq, 0);
        i915_vma_unlock(so.vma);
 err_unpin:
        i915_vma_unpin(so.vma);
index 4484b44..298c4d1 100644 (file)
@@ -118,7 +118,10 @@ static int move_to_active(struct i915_vma *vma,
        int err;
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, flags);
+       err = i915_request_await_object(rq, vma->obj,
+                                       flags & EXEC_OBJECT_WRITE);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, flags);
        i915_vma_unlock(vma);
 
        return err;
@@ -1237,7 +1240,10 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
        }
 
        i915_vma_lock(arg.vma);
-       err = i915_vma_move_to_active(arg.vma, rq, flags);
+       err = i915_request_await_object(rq, arg.vma->obj,
+                                       flags & EXEC_OBJECT_WRITE);
+       if (err == 0)
+               err = i915_vma_move_to_active(arg.vma, rq, flags);
        i915_vma_unlock(arg.vma);
 
        if (flags & EXEC_OBJECT_NEEDS_FENCE)
index b797be1..d791158 100644 (file)
@@ -1459,11 +1459,13 @@ static int smoke_submit(struct preempt_smoke *smoke,
 
        if (vma) {
                i915_vma_lock(vma);
-               err = rq->engine->emit_bb_start(rq,
-                                               vma->node.start,
-                                               PAGE_SIZE, 0);
+               err = i915_request_await_object(rq, vma->obj, false);
                if (!err)
                        err = i915_vma_move_to_active(vma, rq, 0);
+               if (!err)
+                       err = rq->engine->emit_bb_start(rq,
+                                                       vma->node.start,
+                                                       PAGE_SIZE, 0);
                i915_vma_unlock(vma);
        }
 
index eb5da01..d06d68a 100644 (file)
@@ -113,7 +113,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
        }
 
        i915_vma_lock(vma);
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_request_await_object(rq, vma->obj, true);
+       if (err == 0)
+               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unlock(vma);
        if (err)
                goto err_req;
index 3937d43..170e4af 100644 (file)
@@ -876,7 +876,9 @@ static int live_all_engines(void *arg)
                request[id]->batch = batch;
 
                i915_vma_lock(batch);
-               err = i915_vma_move_to_active(batch, request[id], 0);
+               err = i915_request_await_object(request[id], batch->obj, 0);
+               if (err == 0)
+                       err = i915_vma_move_to_active(batch, request[id], 0);
                i915_vma_unlock(batch);
                GEM_BUG_ON(err);