drm/vmwgfx: Replace vmw_dma_buffer with vmw_buffer_object
authorThomas Hellstrom <thellstrom@vmware.com>
Tue, 19 Jun 2018 13:02:16 +0000 (15:02 +0200)
committerThomas Hellstrom <thellstrom@vmware.com>
Tue, 3 Jul 2018 18:33:30 +0000 (20:33 +0200)
Initially vmware buffer objects were only used as DMA buffers, so the name
DMA buffer was a natural one. However, currently they are used also as
dumb buffers and MOBs backing guest backed objects so renaming them to
buffer objects is logical. Particularly since there is a dmabuf subsystem
in the kernel where a dma buffer means something completely different.

This also renames user-space api structures and IOCTL names
correspondingly, but the old names remain defined for now and the ABI
hasn't changed.

There are a couple of minor style changes to make checkpatch happy.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Sinclair Yeh <syeh@vmware.com>
Reviewed-by: Deepak Rawat <drawat@vmware.com>
22 files changed:
drivers/gpu/drm/vmwgfx/Makefile
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c [deleted file]
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c [deleted file]
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c [new file with mode: 0644]
include/uapi/drm/vmwgfx_drm.h

index 794cc9d..09b2aa0 100644 (file)
@@ -1,9 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
 vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
-           vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
+           vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
            vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
            vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
-           vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
+           vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
            vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
            vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
            vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
new file mode 100644 (file)
index 0000000..f26f658
--- /dev/null
@@ -0,0 +1,376 @@
+/**************************************************************************
+ *
+ * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include <drm/ttm/ttm_placement.h>
+
+#include <drm/drmP.h>
+#include "vmwgfx_drv.h"
+
+
+/**
+ * vmw_bo_pin_in_placement - Validate a buffer to placement.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @placement:  The placement to pin it.
+ * @interruptible:  Use interruptible wait.
+ *
+ * Returns
+ *  -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
+                           struct vmw_buffer_object *buf,
+                           struct ttm_placement *placement,
+                           bool interruptible)
+{
+       struct ttm_operation_ctx ctx = {interruptible, false };
+       struct ttm_buffer_object *bo = &buf->base;
+       int ret;
+       uint32_t new_flags;
+
+       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+       if (unlikely(ret != 0))
+               return ret;
+
+       vmw_execbuf_release_pinned_bo(dev_priv);
+
+       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+       if (unlikely(ret != 0))
+               goto err;
+
+       if (buf->pin_count > 0)
+               ret = ttm_bo_mem_compat(placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+       else
+               ret = ttm_bo_validate(bo, placement, &ctx);
+
+       if (!ret)
+               vmw_bo_pin_reserved(buf, true);
+
+       ttm_bo_unreserve(bo);
+
+err:
+       ttm_write_unlock(&dev_priv->reservation_sem);
+       return ret;
+}
+
+/**
+ * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @pin:  Pin buffer if true.
+ * @interruptible:  Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+                             struct vmw_buffer_object *buf,
+                             bool interruptible)
+{
+       struct ttm_operation_ctx ctx = {interruptible, false };
+       struct ttm_buffer_object *bo = &buf->base;
+       int ret;
+       uint32_t new_flags;
+
+       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+       if (unlikely(ret != 0))
+               return ret;
+
+       vmw_execbuf_release_pinned_bo(dev_priv);
+
+       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+       if (unlikely(ret != 0))
+               goto err;
+
+       if (buf->pin_count > 0) {
+               ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+               goto out_unreserve;
+       }
+
+       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+       if (likely(ret == 0) || ret == -ERESTARTSYS)
+               goto out_unreserve;
+
+       ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+
+out_unreserve:
+       if (!ret)
+               vmw_bo_pin_reserved(buf, true);
+
+       ttm_bo_unreserve(bo);
+err:
+       ttm_write_unlock(&dev_priv->reservation_sem);
+       return ret;
+}
+
+/**
+ * vmw_bo_pin_in_vram - Move a buffer to vram.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @interruptible:  Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+                      struct vmw_buffer_object *buf,
+                      bool interruptible)
+{
+       return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
+                                      interruptible);
+}
+
+/**
+ * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
+ *
+ * This function takes the reservation_sem in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to pin.
+ * @interruptible:  Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
+                               struct vmw_buffer_object *buf,
+                               bool interruptible)
+{
+       struct ttm_operation_ctx ctx = {interruptible, false };
+       struct ttm_buffer_object *bo = &buf->base;
+       struct ttm_placement placement;
+       struct ttm_place place;
+       int ret = 0;
+       uint32_t new_flags;
+
+       place = vmw_vram_placement.placement[0];
+       place.lpfn = bo->num_pages;
+       placement.num_placement = 1;
+       placement.placement = &place;
+       placement.num_busy_placement = 1;
+       placement.busy_placement = &place;
+
+       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
+       if (unlikely(ret != 0))
+               return ret;
+
+       vmw_execbuf_release_pinned_bo(dev_priv);
+       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+       if (unlikely(ret != 0))
+               goto err_unlock;
+
+       /*
+        * Is this buffer already in vram but not at the start of it?
+        * In that case, evict it first because TTM isn't good at handling
+        * that situation.
+        */
+       if (bo->mem.mem_type == TTM_PL_VRAM &&
+           bo->mem.start < bo->num_pages &&
+           bo->mem.start > 0 &&
+           buf->pin_count == 0) {
+               ctx.interruptible = false;
+               (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+       }
+
+       if (buf->pin_count > 0)
+               ret = ttm_bo_mem_compat(&placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+       else
+               ret = ttm_bo_validate(bo, &placement, &ctx);
+
+       /* For some reason we didn't end up at the start of vram */
+       WARN_ON(ret == 0 && bo->offset != 0);
+       if (!ret)
+               vmw_bo_pin_reserved(buf, true);
+
+       ttm_bo_unreserve(bo);
+err_unlock:
+       ttm_write_unlock(&dev_priv->reservation_sem);
+
+       return ret;
+}
+
+/**
+ * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
+ *
+ * This function takes the reservation_sem in write mode.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to unpin.
+ * @interruptible:  Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_bo_unpin(struct vmw_private *dev_priv,
+                struct vmw_buffer_object *buf,
+                bool interruptible)
+{
+       struct ttm_buffer_object *bo = &buf->base;
+       int ret;
+
+       ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
+       if (unlikely(ret != 0))
+               return ret;
+
+       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
+       if (unlikely(ret != 0))
+               goto err;
+
+       vmw_bo_pin_reserved(buf, false);
+
+       ttm_bo_unreserve(bo);
+
+err:
+       ttm_read_unlock(&dev_priv->reservation_sem);
+       return ret;
+}
+
+/**
+ * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
+ * of a buffer.
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
+ * @ptr: SVGAGuestPtr returning the result.
+ */
+void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
+                         SVGAGuestPtr *ptr)
+{
+       if (bo->mem.mem_type == TTM_PL_VRAM) {
+               ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
+               ptr->offset = bo->offset;
+       } else {
+               ptr->gmrId = bo->mem.start;
+               ptr->offset = 0;
+       }
+}
+
+
+/**
+ * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
+ *
+ * @vbo: The buffer object. Must be reserved.
+ * @pin: Whether to pin or unpin.
+ *
+ */
+void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
+{
+       struct ttm_operation_ctx ctx = { false, true };
+       struct ttm_place pl;
+       struct ttm_placement placement;
+       struct ttm_buffer_object *bo = &vbo->base;
+       uint32_t old_mem_type = bo->mem.mem_type;
+       int ret;
+
+       lockdep_assert_held(&bo->resv->lock.base);
+
+       if (pin) {
+               if (vbo->pin_count++ > 0)
+                       return;
+       } else {
+               WARN_ON(vbo->pin_count <= 0);
+               if (--vbo->pin_count > 0)
+                       return;
+       }
+
+       pl.fpfn = 0;
+       pl.lpfn = 0;
+       pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
+               | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
+       if (pin)
+               pl.flags |= TTM_PL_FLAG_NO_EVICT;
+
+       memset(&placement, 0, sizeof(placement));
+       placement.num_placement = 1;
+       placement.placement = &pl;
+
+       ret = ttm_bo_validate(bo, &placement, &ctx);
+
+       BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+}
+
+
+/*
+ * vmw_buffer_object_unmap - Tear down a cached buffer object map.
+ *
+ * @vbo: The buffer object whose map we are tearing down.
+ *
+ * This function tears down a cached map set up using
+ * vmw_buffer_object_map_and_cache().
+ */
+void vmw_buffer_object_unmap(struct vmw_buffer_object *vbo)
+{
+       if (vbo->map.bo == NULL)
+               return;
+
+       ttm_bo_kunmap(&vbo->map);
+}
+
+
+/*
+ * vmw_buffer_object_map_and_cache - Map a buffer object and cache the map
+ *
+ * @vbo: The buffer object to map
+ * Return: A kernel virtual address or NULL if mapping failed.
+ *
+ * This function maps a buffer object into the kernel address space, or
+ * returns the virtual kernel address of an already existing map. The virtual
+ * address remains valid as long as the buffer object is pinned or reserved.
+ * The cached map is torn down on either
+ * 1) Buffer object move
+ * 2) Buffer object swapout
+ * 3) Buffer object destruction
+ *
+ */
+void *vmw_buffer_object_map_and_cache(struct vmw_buffer_object *vbo)
+{
+       struct ttm_buffer_object *bo = &vbo->base;
+       bool not_used;
+       void *virtual;
+       int ret;
+
+       virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
+       if (virtual)
+               return virtual;
+
+       ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+       if (ret)
+               DRM_ERROR("Buffer object map failed: %d.\n", ret);
+
+       return ttm_kmap_obj_virtual(&vbo->map, &not_used);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
deleted file mode 100644 (file)
index 21111fd..0000000
+++ /dev/null
@@ -1,887 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_page_alloc.h>
-
-static const struct ttm_place vram_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place vram_ne_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
-};
-
-static const struct ttm_place sys_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place sys_ne_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
-};
-
-static const struct ttm_place gmr_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place gmr_ne_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
-};
-
-static const struct ttm_place mob_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place mob_ne_placement_flags = {
-       .fpfn = 0,
-       .lpfn = 0,
-       .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
-};
-
-struct ttm_placement vmw_vram_placement = {
-       .num_placement = 1,
-       .placement = &vram_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &vram_placement_flags
-};
-
-static const struct ttm_place vram_gmr_placement_flags[] = {
-       {
-               .fpfn = 0,
-               .lpfn = 0,
-               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
-       }, {
-               .fpfn = 0,
-               .lpfn = 0,
-               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
-       }
-};
-
-static const struct ttm_place gmr_vram_placement_flags[] = {
-       {
-               .fpfn = 0,
-               .lpfn = 0,
-               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
-       }, {
-               .fpfn = 0,
-               .lpfn = 0,
-               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
-       }
-};
-
-struct ttm_placement vmw_vram_gmr_placement = {
-       .num_placement = 2,
-       .placement = vram_gmr_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &gmr_placement_flags
-};
-
-static const struct ttm_place vram_gmr_ne_placement_flags[] = {
-       {
-               .fpfn = 0,
-               .lpfn = 0,
-               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
-                        TTM_PL_FLAG_NO_EVICT
-       }, {
-               .fpfn = 0,
-               .lpfn = 0,
-               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
-                        TTM_PL_FLAG_NO_EVICT
-       }
-};
-
-struct ttm_placement vmw_vram_gmr_ne_placement = {
-       .num_placement = 2,
-       .placement = vram_gmr_ne_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &gmr_ne_placement_flags
-};
-
-struct ttm_placement vmw_vram_sys_placement = {
-       .num_placement = 1,
-       .placement = &vram_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &sys_placement_flags
-};
-
-struct ttm_placement vmw_vram_ne_placement = {
-       .num_placement = 1,
-       .placement = &vram_ne_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &vram_ne_placement_flags
-};
-
-struct ttm_placement vmw_sys_placement = {
-       .num_placement = 1,
-       .placement = &sys_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &sys_placement_flags
-};
-
-struct ttm_placement vmw_sys_ne_placement = {
-       .num_placement = 1,
-       .placement = &sys_ne_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &sys_ne_placement_flags
-};
-
-static const struct ttm_place evictable_placement_flags[] = {
-       {
-               .fpfn = 0,
-               .lpfn = 0,
-               .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
-       }, {
-               .fpfn = 0,
-               .lpfn = 0,
-               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
-       }, {
-               .fpfn = 0,
-               .lpfn = 0,
-               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
-       }, {
-               .fpfn = 0,
-               .lpfn = 0,
-               .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
-       }
-};
-
-static const struct ttm_place nonfixed_placement_flags[] = {
-       {
-               .fpfn = 0,
-               .lpfn = 0,
-               .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
-       }, {
-               .fpfn = 0,
-               .lpfn = 0,
-               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
-       }, {
-               .fpfn = 0,
-               .lpfn = 0,
-               .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
-       }
-};
-
-struct ttm_placement vmw_evictable_placement = {
-       .num_placement = 4,
-       .placement = evictable_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &sys_placement_flags
-};
-
-struct ttm_placement vmw_srf_placement = {
-       .num_placement = 1,
-       .num_busy_placement = 2,
-       .placement = &gmr_placement_flags,
-       .busy_placement = gmr_vram_placement_flags
-};
-
-struct ttm_placement vmw_mob_placement = {
-       .num_placement = 1,
-       .num_busy_placement = 1,
-       .placement = &mob_placement_flags,
-       .busy_placement = &mob_placement_flags
-};
-
-struct ttm_placement vmw_mob_ne_placement = {
-       .num_placement = 1,
-       .num_busy_placement = 1,
-       .placement = &mob_ne_placement_flags,
-       .busy_placement = &mob_ne_placement_flags
-};
-
-struct ttm_placement vmw_nonfixed_placement = {
-       .num_placement = 3,
-       .placement = nonfixed_placement_flags,
-       .num_busy_placement = 1,
-       .busy_placement = &sys_placement_flags
-};
-
-struct vmw_ttm_tt {
-       struct ttm_dma_tt dma_ttm;
-       struct vmw_private *dev_priv;
-       int gmr_id;
-       struct vmw_mob *mob;
-       int mem_type;
-       struct sg_table sgt;
-       struct vmw_sg_table vsgt;
-       uint64_t sg_alloc_size;
-       bool mapped;
-};
-
-const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
-
-/**
- * Helper functions to advance a struct vmw_piter iterator.
- *
- * @viter: Pointer to the iterator.
- *
- * These functions return false if past the end of the list,
- * true otherwise. Functions are selected depending on the current
- * DMA mapping mode.
- */
-static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
-{
-       return ++(viter->i) < viter->num_pages;
-}
-
-static bool __vmw_piter_sg_next(struct vmw_piter *viter)
-{
-       return __sg_page_iter_next(&viter->iter);
-}
-
-
-/**
- * Helper functions to return a pointer to the current page.
- *
- * @viter: Pointer to the iterator
- *
- * These functions return a pointer to the page currently
- * pointed to by @viter. Functions are selected depending on the
- * current mapping mode.
- */
-static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
-{
-       return viter->pages[viter->i];
-}
-
-static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
-{
-       return sg_page_iter_page(&viter->iter);
-}
-
-
-/**
- * Helper functions to return the DMA address of the current page.
- *
- * @viter: Pointer to the iterator
- *
- * These functions return the DMA address of the page currently
- * pointed to by @viter. Functions are selected depending on the
- * current mapping mode.
- */
-static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
-{
-       return page_to_phys(viter->pages[viter->i]);
-}
-
-static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
-{
-       return viter->addrs[viter->i];
-}
-
-static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
-{
-       return sg_page_iter_dma_address(&viter->iter);
-}
-
-
-/**
- * vmw_piter_start - Initialize a struct vmw_piter.
- *
- * @viter: Pointer to the iterator to initialize
- * @vsgt: Pointer to a struct vmw_sg_table to initialize from
- *
- * Note that we're following the convention of __sg_page_iter_start, so that
- * the iterator doesn't point to a valid page after initialization; it has
- * to be advanced one step first.
- */
-void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
-                    unsigned long p_offset)
-{
-       viter->i = p_offset - 1;
-       viter->num_pages = vsgt->num_pages;
-       switch (vsgt->mode) {
-       case vmw_dma_phys:
-               viter->next = &__vmw_piter_non_sg_next;
-               viter->dma_address = &__vmw_piter_phys_addr;
-               viter->page = &__vmw_piter_non_sg_page;
-               viter->pages = vsgt->pages;
-               break;
-       case vmw_dma_alloc_coherent:
-               viter->next = &__vmw_piter_non_sg_next;
-               viter->dma_address = &__vmw_piter_dma_addr;
-               viter->page = &__vmw_piter_non_sg_page;
-               viter->addrs = vsgt->addrs;
-               viter->pages = vsgt->pages;
-               break;
-       case vmw_dma_map_populate:
-       case vmw_dma_map_bind:
-               viter->next = &__vmw_piter_sg_next;
-               viter->dma_address = &__vmw_piter_sg_addr;
-               viter->page = &__vmw_piter_sg_page;
-               __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
-                                    vsgt->sgt->orig_nents, p_offset);
-               break;
-       default:
-               BUG();
-       }
-}
-
-/**
- * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
- * TTM pages
- *
- * @vmw_tt: Pointer to a struct vmw_ttm_backend
- *
- * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
- */
-static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
-{
-       struct device *dev = vmw_tt->dev_priv->dev->dev;
-
-       dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
-               DMA_BIDIRECTIONAL);
-       vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
-}
-
-/**
- * vmw_ttm_map_for_dma - map TTM pages to get device addresses
- *
- * @vmw_tt: Pointer to a struct vmw_ttm_backend
- *
- * This function is used to get device addresses from the kernel DMA layer.
- * However, it's violating the DMA API in that when this operation has been
- * performed, it's illegal for the CPU to write to the pages without first
- * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
- * therefore only legal to call this function if we know that the function
- * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
- * a CPU write buffer flush.
- */
-static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
-{
-       struct device *dev = vmw_tt->dev_priv->dev->dev;
-       int ret;
-
-       ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
-                        DMA_BIDIRECTIONAL);
-       if (unlikely(ret == 0))
-               return -ENOMEM;
-
-       vmw_tt->sgt.nents = ret;
-
-       return 0;
-}
-
-/**
- * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
- *
- * @vmw_tt: Pointer to a struct vmw_ttm_tt
- *
- * Select the correct function for and make sure the TTM pages are
- * visible to the device. Allocate storage for the device mappings.
- * If a mapping has already been performed, indicated by the storage
- * pointer being non NULL, the function returns success.
- */
-static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
-{
-       struct vmw_private *dev_priv = vmw_tt->dev_priv;
-       struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
-       struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
-       struct ttm_operation_ctx ctx = {
-               .interruptible = true,
-               .no_wait_gpu = false
-       };
-       struct vmw_piter iter;
-       dma_addr_t old;
-       int ret = 0;
-       static size_t sgl_size;
-       static size_t sgt_size;
-
-       if (vmw_tt->mapped)
-               return 0;
-
-       vsgt->mode = dev_priv->map_mode;
-       vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
-       vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
-       vsgt->addrs = vmw_tt->dma_ttm.dma_address;
-       vsgt->sgt = &vmw_tt->sgt;
-
-       switch (dev_priv->map_mode) {
-       case vmw_dma_map_bind:
-       case vmw_dma_map_populate:
-               if (unlikely(!sgl_size)) {
-                       sgl_size = ttm_round_pot(sizeof(struct scatterlist));
-                       sgt_size = ttm_round_pot(sizeof(struct sg_table));
-               }
-               vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
-               ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
-               if (unlikely(ret != 0))
-                       return ret;
-
-               ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
-                                               vsgt->num_pages, 0,
-                                               (unsigned long)
-                                               vsgt->num_pages << PAGE_SHIFT,
-                                               GFP_KERNEL);
-               if (unlikely(ret != 0))
-                       goto out_sg_alloc_fail;
-
-               if (vsgt->num_pages > vmw_tt->sgt.nents) {
-                       uint64_t over_alloc =
-                               sgl_size * (vsgt->num_pages -
-                                           vmw_tt->sgt.nents);
-
-                       ttm_mem_global_free(glob, over_alloc);
-                       vmw_tt->sg_alloc_size -= over_alloc;
-               }
-
-               ret = vmw_ttm_map_for_dma(vmw_tt);
-               if (unlikely(ret != 0))
-                       goto out_map_fail;
-
-               break;
-       default:
-               break;
-       }
-
-       old = ~((dma_addr_t) 0);
-       vmw_tt->vsgt.num_regions = 0;
-       for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
-               dma_addr_t cur = vmw_piter_dma_addr(&iter);
-
-               if (cur != old + PAGE_SIZE)
-                       vmw_tt->vsgt.num_regions++;
-               old = cur;
-       }
-
-       vmw_tt->mapped = true;
-       return 0;
-
-out_map_fail:
-       sg_free_table(vmw_tt->vsgt.sgt);
-       vmw_tt->vsgt.sgt = NULL;
-out_sg_alloc_fail:
-       ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
-       return ret;
-}
-
-/**
- * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
- *
- * @vmw_tt: Pointer to a struct vmw_ttm_tt
- *
- * Tear down any previously set up device DMA mappings and free
- * any storage space allocated for them. If there are no mappings set up,
- * this function is a NOP.
- */
-static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
-{
-       struct vmw_private *dev_priv = vmw_tt->dev_priv;
-
-       if (!vmw_tt->vsgt.sgt)
-               return;
-
-       switch (dev_priv->map_mode) {
-       case vmw_dma_map_bind:
-       case vmw_dma_map_populate:
-               vmw_ttm_unmap_from_dma(vmw_tt);
-               sg_free_table(vmw_tt->vsgt.sgt);
-               vmw_tt->vsgt.sgt = NULL;
-               ttm_mem_global_free(vmw_mem_glob(dev_priv),
-                                   vmw_tt->sg_alloc_size);
-               break;
-       default:
-               break;
-       }
-       vmw_tt->mapped = false;
-}
-
-
-/**
- * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
- *
- * @bo: Pointer to a struct ttm_buffer_object
- *
- * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
- * instead of a pointer to a struct vmw_ttm_backend as argument.
- * Note that the buffer object must be either pinned or reserved before
- * calling this function.
- */
-int vmw_bo_map_dma(struct ttm_buffer_object *bo)
-{
-       struct vmw_ttm_tt *vmw_tt =
-               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
-
-       return vmw_ttm_map_dma(vmw_tt);
-}
-
-
-/**
- * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
- *
- * @bo: Pointer to a struct ttm_buffer_object
- *
- * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
- * instead of a pointer to a struct vmw_ttm_backend as argument.
- */
-void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
-{
-       struct vmw_ttm_tt *vmw_tt =
-               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
-
-       vmw_ttm_unmap_dma(vmw_tt);
-}
-
-
-/**
- * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
- * TTM buffer object
- *
- * @bo: Pointer to a struct ttm_buffer_object
- *
- * Returns a pointer to a struct vmw_sg_table object. The object should
- * not be freed after use.
- * Note that for the device addresses to be valid, the buffer object must
- * either be reserved or pinned.
- */
-const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
-{
-       struct vmw_ttm_tt *vmw_tt =
-               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
-
-       return &vmw_tt->vsgt;
-}
-
-
-static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
-{
-       struct vmw_ttm_tt *vmw_be =
-               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
-       int ret;
-
-       ret = vmw_ttm_map_dma(vmw_be);
-       if (unlikely(ret != 0))
-               return ret;
-
-       vmw_be->gmr_id = bo_mem->start;
-       vmw_be->mem_type = bo_mem->mem_type;
-
-       switch (bo_mem->mem_type) {
-       case VMW_PL_GMR:
-               return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
-                                   ttm->num_pages, vmw_be->gmr_id);
-       case VMW_PL_MOB:
-               if (unlikely(vmw_be->mob == NULL)) {
-                       vmw_be->mob =
-                               vmw_mob_create(ttm->num_pages);
-                       if (unlikely(vmw_be->mob == NULL))
-                               return -ENOMEM;
-               }
-
-               return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
-                                   &vmw_be->vsgt, ttm->num_pages,
-                                   vmw_be->gmr_id);
-       default:
-               BUG();
-       }
-       return 0;
-}
-
-static int vmw_ttm_unbind(struct ttm_tt *ttm)
-{
-       struct vmw_ttm_tt *vmw_be =
-               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
-
-       switch (vmw_be->mem_type) {
-       case VMW_PL_GMR:
-               vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
-               break;
-       case VMW_PL_MOB:
-               vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
-               break;
-       default:
-               BUG();
-       }
-
-       if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
-               vmw_ttm_unmap_dma(vmw_be);
-
-       return 0;
-}
-
-
-static void vmw_ttm_destroy(struct ttm_tt *ttm)
-{
-       struct vmw_ttm_tt *vmw_be =
-               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
-
-       vmw_ttm_unmap_dma(vmw_be);
-       if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
-               ttm_dma_tt_fini(&vmw_be->dma_ttm);
-       else
-               ttm_tt_fini(ttm);
-
-       if (vmw_be->mob)
-               vmw_mob_destroy(vmw_be->mob);
-
-       kfree(vmw_be);
-}
-
-
-static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
-{
-       struct vmw_ttm_tt *vmw_tt =
-               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
-       struct vmw_private *dev_priv = vmw_tt->dev_priv;
-       struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
-       int ret;
-
-       if (ttm->state != tt_unpopulated)
-               return 0;
-
-       if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
-               size_t size =
-                       ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
-               ret = ttm_mem_global_alloc(glob, size, ctx);
-               if (unlikely(ret != 0))
-                       return ret;
-
-               ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
-                                       ctx);
-               if (unlikely(ret != 0))
-                       ttm_mem_global_free(glob, size);
-       } else
-               ret = ttm_pool_populate(ttm, ctx);
-
-       return ret;
-}
-
-static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
-{
-       struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
-                                                dma_ttm.ttm);
-       struct vmw_private *dev_priv = vmw_tt->dev_priv;
-       struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
-
-
-       if (vmw_tt->mob) {
-               vmw_mob_destroy(vmw_tt->mob);
-               vmw_tt->mob = NULL;
-       }
-
-       vmw_ttm_unmap_dma(vmw_tt);
-       if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
-               size_t size =
-                       ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
-
-               ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
-               ttm_mem_global_free(glob, size);
-       } else
-               ttm_pool_unpopulate(ttm);
-}
-
-static struct ttm_backend_func vmw_ttm_func = {
-       .bind = vmw_ttm_bind,
-       .unbind = vmw_ttm_unbind,
-       .destroy = vmw_ttm_destroy,
-};
-
-static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
-                                       uint32_t page_flags)
-{
-       struct vmw_ttm_tt *vmw_be;
-       int ret;
-
-       vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
-       if (!vmw_be)
-               return NULL;
-
-       vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
-       vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
-       vmw_be->mob = NULL;
-
-       if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
-               ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
-       else
-               ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
-       if (unlikely(ret != 0))
-               goto out_no_init;
-
-       return &vmw_be->dma_ttm.ttm;
-out_no_init:
-       kfree(vmw_be);
-       return NULL;
-}
-
-static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
-       return 0;
-}
-
-static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
-                     struct ttm_mem_type_manager *man)
-{
-       switch (type) {
-       case TTM_PL_SYSTEM:
-               /* System memory */
-
-               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
-               man->available_caching = TTM_PL_FLAG_CACHED;
-               man->default_caching = TTM_PL_FLAG_CACHED;
-               break;
-       case TTM_PL_VRAM:
-               /* "On-card" video ram */
-               man->func = &ttm_bo_manager_func;
-               man->gpu_offset = 0;
-               man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
-               man->available_caching = TTM_PL_FLAG_CACHED;
-               man->default_caching = TTM_PL_FLAG_CACHED;
-               break;
-       case VMW_PL_GMR:
-       case VMW_PL_MOB:
-               /*
-                * "Guest Memory Regions" is an aperture like feature with
-                *  one slot per bo. There is an upper limit of the number of
-                *  slots as well as the bo size.
-                */
-               man->func = &vmw_gmrid_manager_func;
-               man->gpu_offset = 0;
-               man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
-               man->available_caching = TTM_PL_FLAG_CACHED;
-               man->default_caching = TTM_PL_FLAG_CACHED;
-               break;
-       default:
-               DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static void vmw_evict_flags(struct ttm_buffer_object *bo,
-                    struct ttm_placement *placement)
-{
-       *placement = vmw_sys_placement;
-}
-
-static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
-       struct ttm_object_file *tfile =
-               vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
-
-       return vmw_user_dmabuf_verify_access(bo, tfile);
-}
-
-static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
-{
-       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
-       struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
-
-       mem->bus.addr = NULL;
-       mem->bus.is_iomem = false;
-       mem->bus.offset = 0;
-       mem->bus.size = mem->num_pages << PAGE_SHIFT;
-       mem->bus.base = 0;
-       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
-               return -EINVAL;
-       switch (mem->mem_type) {
-       case TTM_PL_SYSTEM:
-       case VMW_PL_GMR:
-       case VMW_PL_MOB:
-               return 0;
-       case TTM_PL_VRAM:
-               mem->bus.offset = mem->start << PAGE_SHIFT;
-               mem->bus.base = dev_priv->vram_start;
-               mem->bus.is_iomem = true;
-               break;
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
-{
-}
-
-static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
-{
-       return 0;
-}
-
-/**
- * vmw_move_notify - TTM move_notify_callback
- *
- * @bo: The TTM buffer object about to move.
- * @mem: The struct ttm_mem_reg indicating to what memory
- *       region the move is taking place.
- *
- * Calls move_notify for all subsystems needing it.
- * (currently only resources).
- */
-static void vmw_move_notify(struct ttm_buffer_object *bo,
-                           bool evict,
-                           struct ttm_mem_reg *mem)
-{
-       vmw_resource_move_notify(bo, mem);
-       vmw_query_move_notify(bo, mem);
-}
-
-
-/**
- * vmw_swap_notify - TTM move_notify_callback
- *
- * @bo: The TTM buffer object about to be swapped out.
- */
-static void vmw_swap_notify(struct ttm_buffer_object *bo)
-{
-       vmw_resource_swap_notify(bo);
-       (void) ttm_bo_wait(bo, false, false);
-}
-
-
-struct ttm_bo_driver vmw_bo_driver = {
-       .ttm_tt_create = &vmw_ttm_tt_create,
-       .ttm_tt_populate = &vmw_ttm_populate,
-       .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
-       .invalidate_caches = vmw_invalidate_caches,
-       .init_mem_type = vmw_init_mem_type,
-       .eviction_valuable = ttm_bo_eviction_valuable,
-       .evict_flags = vmw_evict_flags,
-       .move = NULL,
-       .verify_access = vmw_verify_access,
-       .move_notify = vmw_move_notify,
-       .swap_notify = vmw_swap_notify,
-       .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
-       .io_mem_reserve = &vmw_ttm_io_mem_reserve,
-       .io_mem_free = &vmw_ttm_io_mem_free,
-};
index 3767ac3..ff8acc7 100644 (file)
@@ -38,7 +38,7 @@ struct vmw_user_context {
        struct vmw_cmdbuf_res_manager *man;
        struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
        spinlock_t cotable_lock;
-       struct vmw_dma_buffer *dx_query_mob;
+       struct vmw_buffer_object *dx_query_mob;
 };
 
 static void vmw_user_context_free(struct vmw_resource *res);
@@ -900,7 +900,7 @@ vmw_context_binding_state(struct vmw_resource *ctx)
  * specified in the parameter.  0 otherwise.
  */
 int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
-                             struct vmw_dma_buffer *mob)
+                             struct vmw_buffer_object *mob)
 {
        struct vmw_user_context *uctx =
                container_of(ctx_res, struct vmw_user_context, res);
@@ -908,7 +908,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
        if (mob == NULL) {
                if (uctx->dx_query_mob) {
                        uctx->dx_query_mob->dx_query_ctx = NULL;
-                       vmw_dmabuf_unreference(&uctx->dx_query_mob);
+                       vmw_bo_unreference(&uctx->dx_query_mob);
                        uctx->dx_query_mob = NULL;
                }
 
@@ -922,7 +922,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
        mob->dx_query_ctx  = ctx_res;
 
        if (!uctx->dx_query_mob)
-               uctx->dx_query_mob = vmw_dmabuf_reference(mob);
+               uctx->dx_query_mob = vmw_bo_reference(mob);
 
        return 0;
 }
@@ -932,7 +932,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
  *
  * @ctx_res: The context resource
  */
-struct vmw_dma_buffer *
+struct vmw_buffer_object *
 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
 {
        struct vmw_user_context *uctx =
index cbf54ea..1052cd3 100644 (file)
@@ -390,7 +390,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
        struct ttm_operation_ctx ctx = { false, false };
        struct vmw_private *dev_priv = res->dev_priv;
        struct vmw_cotable *vcotbl = vmw_cotable(res);
-       struct vmw_dma_buffer *buf, *old_buf = res->backup;
+       struct vmw_buffer_object *buf, *old_buf = res->backup;
        struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
        size_t old_size = res->backup_size;
        size_t old_size_read_back = vcotbl->size_read_back;
@@ -415,8 +415,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
        if (!buf)
                return -ENOMEM;
 
-       ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
-                             true, vmw_dmabuf_bo_free);
+       ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
+                         true, vmw_bo_bo_free);
        if (ret) {
                DRM_ERROR("Failed initializing new cotable MOB.\n");
                return ret;
@@ -482,7 +482,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
        /* Let go of the old mob. */
        list_del(&res->mob_head);
        list_add_tail(&res->mob_head, &buf->res_list);
-       vmw_dmabuf_unreference(&old_buf);
+       vmw_bo_unreference(&old_buf);
        res->id = vcotbl->type;
 
        return 0;
@@ -491,7 +491,7 @@ out_map_new:
        ttm_bo_kunmap(&old_map);
 out_wait:
        ttm_bo_unreserve(bo);
-       vmw_dmabuf_unreference(&buf);
+       vmw_bo_unreference(&buf);
 
        return ret;
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
deleted file mode 100644 (file)
index d59d9dd..0000000
+++ /dev/null
@@ -1,376 +0,0 @@
-/**************************************************************************
- *
- * Copyright © 2011-2015 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include <drm/ttm/ttm_placement.h>
-
-#include <drm/drmP.h>
-#include "vmwgfx_drv.h"
-
-
-/**
- * vmw_dmabuf_pin_in_placement - Validate a buffer to placement.
- *
- * @dev_priv:  Driver private.
- * @buf:  DMA buffer to move.
- * @placement:  The placement to pin it.
- * @interruptible:  Use interruptible wait.
- *
- * Returns
- *  -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
-                               struct vmw_dma_buffer *buf,
-                               struct ttm_placement *placement,
-                               bool interruptible)
-{
-       struct ttm_operation_ctx ctx = {interruptible, false };
-       struct ttm_buffer_object *bo = &buf->base;
-       int ret;
-       uint32_t new_flags;
-
-       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
-       if (unlikely(ret != 0))
-               return ret;
-
-       vmw_execbuf_release_pinned_bo(dev_priv);
-
-       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
-       if (unlikely(ret != 0))
-               goto err;
-
-       if (buf->pin_count > 0)
-               ret = ttm_bo_mem_compat(placement, &bo->mem,
-                                       &new_flags) == true ? 0 : -EINVAL;
-       else
-               ret = ttm_bo_validate(bo, placement, &ctx);
-
-       if (!ret)
-               vmw_bo_pin_reserved(buf, true);
-
-       ttm_bo_unreserve(bo);
-
-err:
-       ttm_write_unlock(&dev_priv->reservation_sem);
-       return ret;
-}
-
-/**
- * vmw_dmabuf_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv:  Driver private.
- * @buf:  DMA buffer to move.
- * @pin:  Pin buffer if true.
- * @interruptible:  Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
-                                 struct vmw_dma_buffer *buf,
-                                 bool interruptible)
-{
-       struct ttm_operation_ctx ctx = {interruptible, false };
-       struct ttm_buffer_object *bo = &buf->base;
-       int ret;
-       uint32_t new_flags;
-
-       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
-       if (unlikely(ret != 0))
-               return ret;
-
-       vmw_execbuf_release_pinned_bo(dev_priv);
-
-       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
-       if (unlikely(ret != 0))
-               goto err;
-
-       if (buf->pin_count > 0) {
-               ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
-                                       &new_flags) == true ? 0 : -EINVAL;
-               goto out_unreserve;
-       }
-
-       ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
-       if (likely(ret == 0) || ret == -ERESTARTSYS)
-               goto out_unreserve;
-
-       ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
-
-out_unreserve:
-       if (!ret)
-               vmw_bo_pin_reserved(buf, true);
-
-       ttm_bo_unreserve(bo);
-err:
-       ttm_write_unlock(&dev_priv->reservation_sem);
-       return ret;
-}
-
-/**
- * vmw_dmabuf_pin_in_vram - Move a buffer to vram.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv:  Driver private.
- * @buf:  DMA buffer to move.
- * @interruptible:  Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
-                          struct vmw_dma_buffer *buf,
-                          bool interruptible)
-{
-       return vmw_dmabuf_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
-                                          interruptible);
-}
-
-/**
- * vmw_dmabuf_pin_in_start_of_vram - Move a buffer to start of vram.
- *
- * This function takes the reservation_sem in write mode.
- * Flushes and unpins the query bo to avoid failures.
- *
- * @dev_priv:  Driver private.
- * @buf:  DMA buffer to pin.
- * @interruptible:  Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
-                                   struct vmw_dma_buffer *buf,
-                                   bool interruptible)
-{
-       struct ttm_operation_ctx ctx = {interruptible, false };
-       struct ttm_buffer_object *bo = &buf->base;
-       struct ttm_placement placement;
-       struct ttm_place place;
-       int ret = 0;
-       uint32_t new_flags;
-
-       place = vmw_vram_placement.placement[0];
-       place.lpfn = bo->num_pages;
-       placement.num_placement = 1;
-       placement.placement = &place;
-       placement.num_busy_placement = 1;
-       placement.busy_placement = &place;
-
-       ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
-       if (unlikely(ret != 0))
-               return ret;
-
-       vmw_execbuf_release_pinned_bo(dev_priv);
-       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
-       if (unlikely(ret != 0))
-               goto err_unlock;
-
-       /*
-        * Is this buffer already in vram but not at the start of it?
-        * In that case, evict it first because TTM isn't good at handling
-        * that situation.
-        */
-       if (bo->mem.mem_type == TTM_PL_VRAM &&
-           bo->mem.start < bo->num_pages &&
-           bo->mem.start > 0 &&
-           buf->pin_count == 0) {
-               ctx.interruptible = false;
-               (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
-       }
-
-       if (buf->pin_count > 0)
-               ret = ttm_bo_mem_compat(&placement, &bo->mem,
-                                       &new_flags) == true ? 0 : -EINVAL;
-       else
-               ret = ttm_bo_validate(bo, &placement, &ctx);
-
-       /* For some reason we didn't end up at the start of vram */
-       WARN_ON(ret == 0 && bo->offset != 0);
-       if (!ret)
-               vmw_bo_pin_reserved(buf, true);
-
-       ttm_bo_unreserve(bo);
-err_unlock:
-       ttm_write_unlock(&dev_priv->reservation_sem);
-
-       return ret;
-}
-
-/**
- * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer.
- *
- * This function takes the reservation_sem in write mode.
- *
- * @dev_priv:  Driver private.
- * @buf:  DMA buffer to unpin.
- * @interruptible:  Use interruptible wait.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
-                    struct vmw_dma_buffer *buf,
-                    bool interruptible)
-{
-       struct ttm_buffer_object *bo = &buf->base;
-       int ret;
-
-       ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
-       if (unlikely(ret != 0))
-               return ret;
-
-       ret = ttm_bo_reserve(bo, interruptible, false, NULL);
-       if (unlikely(ret != 0))
-               goto err;
-
-       vmw_bo_pin_reserved(buf, false);
-
-       ttm_bo_unreserve(bo);
-
-err:
-       ttm_read_unlock(&dev_priv->reservation_sem);
-       return ret;
-}
-
-/**
- * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
- * of a buffer.
- *
- * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
- * @ptr: SVGAGuestPtr returning the result.
- */
-void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
-                         SVGAGuestPtr *ptr)
-{
-       if (bo->mem.mem_type == TTM_PL_VRAM) {
-               ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
-               ptr->offset = bo->offset;
-       } else {
-               ptr->gmrId = bo->mem.start;
-               ptr->offset = 0;
-       }
-}
-
-
-/**
- * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
- *
- * @vbo: The buffer object. Must be reserved.
- * @pin: Whether to pin or unpin.
- *
- */
-void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
-{
-       struct ttm_operation_ctx ctx = { false, true };
-       struct ttm_place pl;
-       struct ttm_placement placement;
-       struct ttm_buffer_object *bo = &vbo->base;
-       uint32_t old_mem_type = bo->mem.mem_type;
-       int ret;
-
-       lockdep_assert_held(&bo->resv->lock.base);
-
-       if (pin) {
-               if (vbo->pin_count++ > 0)
-                       return;
-       } else {
-               WARN_ON(vbo->pin_count <= 0);
-               if (--vbo->pin_count > 0)
-                       return;
-       }
-
-       pl.fpfn = 0;
-       pl.lpfn = 0;
-       pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
-               | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
-       if (pin)
-               pl.flags |= TTM_PL_FLAG_NO_EVICT;
-
-       memset(&placement, 0, sizeof(placement));
-       placement.num_placement = 1;
-       placement.placement = &pl;
-
-       ret = ttm_bo_validate(bo, &placement, &ctx);
-
-       BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
-}
-
-
-/*
- * vmw_dma_buffer_unmap - Tear down a cached buffer object map.
- *
- * @vbo: The buffer object whose map we are tearing down.
- *
- * This function tears down a cached map set up using
- * vmw_dma_buffer_map_and_cache().
- */
-void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
-{
-       if (vbo->map.bo == NULL)
-               return;
-
-       ttm_bo_kunmap(&vbo->map);
-}
-
-
-/*
- * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
- *
- * @vbo: The buffer object to map
- * Return: A kernel virtual address or NULL if mapping failed.
- *
- * This function maps a buffer object into the kernel address space, or
- * returns the virtual kernel address of an already existing map. The virtual
- * address remains valid as long as the buffer object is pinned or reserved.
- * The cached map is torn down on either
- * 1) Buffer object move
- * 2) Buffer object swapout
- * 3) Buffer object destruction
- *
- */
-void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
-{
-       struct ttm_buffer_object *bo = &vbo->base;
-       bool not_used;
-       void *virtual;
-       int ret;
-
-       virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
-       if (virtual)
-               return virtual;
-
-       ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
-       if (ret)
-               DRM_ERROR("Buffer object map failed: %d.\n", ret);
-
-       return ttm_kmap_obj_virtual(&vbo->map, &not_used);
-}
index 09cc721..4f18304 100644 (file)
 static const struct drm_ioctl_desc vmw_ioctls[] = {
        VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
                      DRM_AUTH | DRM_RENDER_ALLOW),
-       VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
+       VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_bo_alloc_ioctl,
                      DRM_AUTH | DRM_RENDER_ALLOW),
-       VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
+       VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_bo_unref_ioctl,
                      DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
                      vmw_kms_cursor_bypass_ioctl,
@@ -219,7 +219,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
                      vmw_gb_surface_reference_ioctl,
                      DRM_AUTH | DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_SYNCCPU,
-                     vmw_user_dmabuf_synccpu_ioctl,
+                     vmw_user_bo_synccpu_ioctl,
                      DRM_RENDER_ALLOW),
        VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
                      vmw_extended_context_define_ioctl,
@@ -321,7 +321,7 @@ static void vmw_print_capabilities(uint32_t capabilities)
 static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 {
        int ret;
-       struct vmw_dma_buffer *vbo;
+       struct vmw_buffer_object *vbo;
        struct ttm_bo_kmap_obj map;
        volatile SVGA3dQueryResult *result;
        bool dummy;
@@ -335,9 +335,9 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
        if (!vbo)
                return -ENOMEM;
 
-       ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
-                             &vmw_sys_ne_placement, false,
-                             &vmw_dmabuf_bo_free);
+       ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
+                         &vmw_sys_ne_placement, false,
+                         &vmw_bo_bo_free);
        if (unlikely(ret != 0))
                return ret;
 
@@ -358,7 +358,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
 
        if (unlikely(ret != 0)) {
                DRM_ERROR("Dummy query buffer map failed.\n");
-               vmw_dmabuf_unreference(&vbo);
+               vmw_bo_unreference(&vbo);
        } else
                dev_priv->dummy_query_bo = vbo;
 
@@ -460,7 +460,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
 
        BUG_ON(dev_priv->pinned_bo != NULL);
 
-       vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
+       vmw_bo_unreference(&dev_priv->dummy_query_bo);
        if (dev_priv->cman)
                vmw_cmdbuf_remove_pool(dev_priv->cman);
 
index 5fcbe16..25c2f66 100644 (file)
@@ -86,7 +86,7 @@ struct vmw_fpriv {
        bool gb_aware;
 };
 
-struct vmw_dma_buffer {
+struct vmw_buffer_object {
        struct ttm_buffer_object base;
        struct list_head res_list;
        s32 pin_count;
@@ -120,7 +120,7 @@ struct vmw_resource {
        unsigned long backup_size;
        bool res_dirty; /* Protected by backup buffer reserved */
        bool backup_dirty; /* Protected by backup buffer reserved */
-       struct vmw_dma_buffer *backup;
+       struct vmw_buffer_object *backup;
        unsigned long backup_offset;
        unsigned long pin_count; /* Protected by resource reserved */
        const struct vmw_res_func *func;
@@ -304,7 +304,7 @@ struct vmw_sw_context{
        uint32_t cmd_bounce_size;
        struct list_head resource_list;
        struct list_head ctx_resource_list; /* For contexts and cotables */
-       struct vmw_dma_buffer *cur_query_bo;
+       struct vmw_buffer_object *cur_query_bo;
        struct list_head res_relocations;
        uint32_t *buf_start;
        struct vmw_res_cache_entry res_cache[vmw_res_max];
@@ -315,7 +315,7 @@ struct vmw_sw_context{
        bool staged_bindings_inuse;
        struct list_head staged_cmd_res;
        struct vmw_resource_val_node *dx_ctx_node;
-       struct vmw_dma_buffer *dx_query_mob;
+       struct vmw_buffer_object *dx_query_mob;
        struct vmw_resource *dx_query_ctx;
        struct vmw_cmdbuf_res_manager *man;
 };
@@ -513,8 +513,8 @@ struct vmw_private {
         * are protected by the cmdbuf mutex.
         */
 
-       struct vmw_dma_buffer *dummy_query_bo;
-       struct vmw_dma_buffer *pinned_bo;
+       struct vmw_buffer_object *dummy_query_bo;
+       struct vmw_buffer_object *pinned_bo;
        uint32_t query_cid;
        uint32_t query_cid_valid;
        bool dummy_query_bo_pinned;
@@ -623,43 +623,43 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
                                  struct ttm_object_file *tfile,
                                  uint32_t handle,
                                  struct vmw_surface **out_surf,
-                                 struct vmw_dma_buffer **out_buf);
+                                 struct vmw_buffer_object **out_buf);
 extern int vmw_user_resource_lookup_handle(
        struct vmw_private *dev_priv,
        struct ttm_object_file *tfile,
        uint32_t handle,
        const struct vmw_user_resource_conv *converter,
        struct vmw_resource **p_res);
-extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
-extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
-                          struct vmw_dma_buffer *vmw_bo,
-                          size_t size, struct ttm_placement *placement,
-                          bool interuptable,
-                          void (*bo_free) (struct ttm_buffer_object *bo));
-extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
-                                 struct ttm_object_file *tfile);
-extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
-                                struct ttm_object_file *tfile,
-                                uint32_t size,
-                                bool shareable,
-                                uint32_t *handle,
-                                struct vmw_dma_buffer **p_dma_buf,
-                                struct ttm_base_object **p_base);
-extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
-                                    struct vmw_dma_buffer *dma_buf,
-                                    uint32_t *handle);
-extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
-                                 struct drm_file *file_priv);
-extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
-                                 struct drm_file *file_priv);
-extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
-                                        struct drm_file *file_priv);
-extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
-                                        uint32_t cur_validate_node);
-extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
-extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
-                                 uint32_t id, struct vmw_dma_buffer **out,
-                                 struct ttm_base_object **base);
+extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_bo_init(struct vmw_private *dev_priv,
+                      struct vmw_buffer_object *vmw_bo,
+                      size_t size, struct ttm_placement *placement,
+                      bool interuptable,
+                      void (*bo_free)(struct ttm_buffer_object *bo));
+extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
+                                    struct ttm_object_file *tfile);
+extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+                            struct ttm_object_file *tfile,
+                            uint32_t size,
+                            bool shareable,
+                            uint32_t *handle,
+                            struct vmw_buffer_object **p_dma_buf,
+                            struct ttm_base_object **p_base);
+extern int vmw_user_bo_reference(struct ttm_object_file *tfile,
+                                struct vmw_buffer_object *dma_buf,
+                                uint32_t *handle);
+extern int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
+extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
+extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+                                    struct drm_file *file_priv);
+extern uint32_t vmw_bo_validate_node(struct ttm_buffer_object *bo,
+                                    uint32_t cur_validate_node);
+extern void vmw_bo_validate_clear(struct ttm_buffer_object *bo);
+extern int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+                             uint32_t id, struct vmw_buffer_object **out,
+                             struct ttm_base_object **base);
 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -670,43 +670,43 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
                                  struct vmw_resource **out);
 extern void vmw_resource_unreserve(struct vmw_resource *res,
                                   bool switch_backup,
-                                  struct vmw_dma_buffer *new_backup,
+                                  struct vmw_buffer_object *new_backup,
                                   unsigned long new_backup_offset);
 extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
                                     struct ttm_mem_reg *mem);
 extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
                                  struct ttm_mem_reg *mem);
 extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo);
-extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
+extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
 extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
                                struct vmw_fence_obj *fence);
 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
 
 
 /**
- * DMA buffer helper routines - vmwgfx_dmabuf.c
+ * Buffer object helper functions - vmwgfx_bo.c
  */
-extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
-                                      struct vmw_dma_buffer *bo,
-                                      struct ttm_placement *placement,
+extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
+                                  struct vmw_buffer_object *bo,
+                                  struct ttm_placement *placement,
+                                  bool interruptible);
+extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+                             struct vmw_buffer_object *buf,
+                             bool interruptible);
+extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+                                    struct vmw_buffer_object *buf,
+                                    bool interruptible);
+extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
+                                      struct vmw_buffer_object *bo,
                                       bool interruptible);
-extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
-                                 struct vmw_dma_buffer *buf,
-                                 bool interruptible);
-extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
-                                        struct vmw_dma_buffer *buf,
-                                        bool interruptible);
-extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
-                                          struct vmw_dma_buffer *bo,
-                                          bool interruptible);
-extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
-                           struct vmw_dma_buffer *bo,
-                           bool interruptible);
+extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
+                       struct vmw_buffer_object *bo,
+                       bool interruptible);
 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
                                 SVGAGuestPtr *ptr);
-extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
-extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo);
-extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo);
+extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
+extern void *vmw_buffer_object_map_and_cache(struct vmw_buffer_object *vbo);
+extern void vmw_buffer_object_unmap(struct vmw_buffer_object *vbo);
 
 /**
  * Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -758,7 +758,7 @@ extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
 
 /**
- * TTM buffer object driver - vmwgfx_buffer.c
+ * TTM buffer object driver - vmwgfx_ttm_buffer.c
  */
 
 extern const size_t vmw_tt_size;
@@ -1041,8 +1041,8 @@ vmw_context_binding_state(struct vmw_resource *ctx);
 extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
                                          bool readback);
 extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
-                                    struct vmw_dma_buffer *mob);
-extern struct vmw_dma_buffer *
+                                    struct vmw_buffer_object *mob);
+extern struct vmw_buffer_object *
 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
 
 
@@ -1243,9 +1243,9 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
        return srf;
 }
 
-static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
+static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
 {
-       struct vmw_dma_buffer *tmp_buf = *buf;
+       struct vmw_buffer_object *tmp_buf = *buf;
 
        *buf = NULL;
        if (tmp_buf != NULL) {
@@ -1255,7 +1255,8 @@ static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
        }
 }
 
-static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
+static inline struct vmw_buffer_object *
+vmw_bo_reference(struct vmw_buffer_object *buf)
 {
        if (ttm_bo_reference(&buf->base))
                return buf;
index c9d5cc2..a8b1946 100644 (file)
@@ -92,7 +92,7 @@ struct vmw_resource_val_node {
        struct list_head head;
        struct drm_hash_item hash;
        struct vmw_resource *res;
-       struct vmw_dma_buffer *new_backup;
+       struct vmw_buffer_object *new_backup;
        struct vmw_ctx_binding_state *staged_bindings;
        unsigned long new_backup_offset;
        u32 first_usage : 1;
@@ -126,9 +126,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
                                 struct vmw_sw_context *sw_context,
                                 SVGAMobId *id,
-                                struct vmw_dma_buffer **vmw_bo_p);
+                                struct vmw_buffer_object **vmw_bo_p);
 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
-                                  struct vmw_dma_buffer *vbo,
+                                  struct vmw_buffer_object *vbo,
                                   bool validate_as_mob,
                                   uint32_t *p_val_node);
 /**
@@ -185,7 +185,7 @@ static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
                }
                vmw_resource_unreserve(res, switch_backup, val->new_backup,
                                       val->new_backup_offset);
-               vmw_dmabuf_unreference(&val->new_backup);
+               vmw_bo_unreference(&val->new_backup);
        }
 }
 
@@ -423,7 +423,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
        }
 
        if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
-               struct vmw_dma_buffer *dx_query_mob;
+               struct vmw_buffer_object *dx_query_mob;
 
                dx_query_mob = vmw_context_get_dx_query_mob(ctx);
                if (dx_query_mob)
@@ -544,7 +544,7 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
  * submission is reached.
  */
 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
-                                  struct vmw_dma_buffer *vbo,
+                                  struct vmw_buffer_object *vbo,
                                   bool validate_as_mob,
                                   uint32_t *p_val_node)
 {
@@ -616,7 +616,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
                        return ret;
 
                if (res->backup) {
-                       struct vmw_dma_buffer *vbo = res->backup;
+                       struct vmw_buffer_object *vbo = res->backup;
 
                        ret = vmw_bo_to_validate_list
                                (sw_context, vbo,
@@ -628,7 +628,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
        }
 
        if (sw_context->dx_query_mob) {
-               struct vmw_dma_buffer *expected_dx_query_mob;
+               struct vmw_buffer_object *expected_dx_query_mob;
 
                expected_dx_query_mob =
                        vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
@@ -657,7 +657,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 
        list_for_each_entry(val, &sw_context->resource_list, head) {
                struct vmw_resource *res = val->res;
-               struct vmw_dma_buffer *backup = res->backup;
+               struct vmw_buffer_object *backup = res->backup;
 
                ret = vmw_resource_validate(res);
                if (unlikely(ret != 0)) {
@@ -668,7 +668,7 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 
                /* Check if the resource switched backup buffer */
                if (backup && res->backup && (backup != res->backup)) {
-                       struct vmw_dma_buffer *vbo = res->backup;
+                       struct vmw_buffer_object *vbo = res->backup;
 
                        ret = vmw_bo_to_validate_list
                                (sw_context, vbo,
@@ -821,7 +821,7 @@ out_no_reloc:
 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
 {
        struct vmw_private *dev_priv = ctx_res->dev_priv;
-       struct vmw_dma_buffer *dx_query_mob;
+       struct vmw_buffer_object *dx_query_mob;
        struct {
                SVGA3dCmdHeader header;
                SVGA3dCmdDXBindAllQuery body;
@@ -1152,7 +1152,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
  * command batch.
  */
 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
-                                      struct vmw_dma_buffer *new_query_bo,
+                                      struct vmw_buffer_object *new_query_bo,
                                       struct vmw_sw_context *sw_context)
 {
        struct vmw_res_cache_entry *ctx_entry =
@@ -1234,7 +1234,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
        if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
                if (dev_priv->pinned_bo) {
                        vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
-                       vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+                       vmw_bo_unreference(&dev_priv->pinned_bo);
                }
 
                if (!sw_context->needs_post_query_barrier) {
@@ -1256,7 +1256,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
                        dev_priv->query_cid = sw_context->last_query_ctx->id;
                        dev_priv->query_cid_valid = true;
                        dev_priv->pinned_bo =
-                               vmw_dmabuf_reference(sw_context->cur_query_bo);
+                               vmw_bo_reference(sw_context->cur_query_bo);
                }
        }
 }
@@ -1282,15 +1282,14 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
                                 struct vmw_sw_context *sw_context,
                                 SVGAMobId *id,
-                                struct vmw_dma_buffer **vmw_bo_p)
+                                struct vmw_buffer_object **vmw_bo_p)
 {
-       struct vmw_dma_buffer *vmw_bo = NULL;
+       struct vmw_buffer_object *vmw_bo = NULL;
        uint32_t handle = *id;
        struct vmw_relocation *reloc;
        int ret;
 
-       ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
-                                    NULL);
+       ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Could not find or use MOB buffer.\n");
                ret = -EINVAL;
@@ -1316,7 +1315,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
        return 0;
 
 out_no_reloc:
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        *vmw_bo_p = NULL;
        return ret;
 }
@@ -1343,15 +1342,14 @@ out_no_reloc:
 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
                                   struct vmw_sw_context *sw_context,
                                   SVGAGuestPtr *ptr,
-                                  struct vmw_dma_buffer **vmw_bo_p)
+                                  struct vmw_buffer_object **vmw_bo_p)
 {
-       struct vmw_dma_buffer *vmw_bo = NULL;
+       struct vmw_buffer_object *vmw_bo = NULL;
        uint32_t handle = ptr->gmrId;
        struct vmw_relocation *reloc;
        int ret;
 
-       ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
-                                    NULL);
+       ret = vmw_user_bo_lookup(sw_context->fp->tfile, handle, &vmw_bo, NULL);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Could not find or use GMR region.\n");
                ret = -EINVAL;
@@ -1376,7 +1374,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
        return 0;
 
 out_no_reloc:
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        *vmw_bo_p = NULL;
        return ret;
 }
@@ -1447,7 +1445,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
                SVGA3dCmdDXBindQuery q;
        } *cmd;
 
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        int    ret;
 
 
@@ -1466,7 +1464,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
        sw_context->dx_query_mob = vmw_bo;
        sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
 
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
 
        return ret;
 }
@@ -1549,7 +1547,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
                                struct vmw_sw_context *sw_context,
                                SVGA3dCmdHeader *header)
 {
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        struct vmw_query_cmd {
                SVGA3dCmdHeader header;
                SVGA3dCmdEndGBQuery q;
@@ -1569,7 +1567,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
 
        ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        return ret;
 }
 
@@ -1584,7 +1582,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
                             struct vmw_sw_context *sw_context,
                             SVGA3dCmdHeader *header)
 {
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        struct vmw_query_cmd {
                SVGA3dCmdHeader header;
                SVGA3dCmdEndQuery q;
@@ -1623,7 +1621,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
 
        ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        return ret;
 }
 
@@ -1638,7 +1636,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
                                 struct vmw_sw_context *sw_context,
                                 SVGA3dCmdHeader *header)
 {
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        struct vmw_query_cmd {
                SVGA3dCmdHeader header;
                SVGA3dCmdWaitForGBQuery q;
@@ -1656,7 +1654,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        return 0;
 }
 
@@ -1671,7 +1669,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
                              struct vmw_sw_context *sw_context,
                              SVGA3dCmdHeader *header)
 {
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        struct vmw_query_cmd {
                SVGA3dCmdHeader header;
                SVGA3dCmdWaitForQuery q;
@@ -1708,7 +1706,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        return 0;
 }
 
@@ -1716,7 +1714,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
                       struct vmw_sw_context *sw_context,
                       SVGA3dCmdHeader *header)
 {
-       struct vmw_dma_buffer *vmw_bo = NULL;
+       struct vmw_buffer_object *vmw_bo = NULL;
        struct vmw_surface *srf = NULL;
        struct vmw_dma_cmd {
                SVGA3dCmdHeader header;
@@ -1768,7 +1766,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
                             header);
 
 out_no_surface:
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
        return ret;
 }
 
@@ -1887,7 +1885,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
                                      struct vmw_sw_context *sw_context,
                                      void *buf)
 {
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        int ret;
 
        struct {
@@ -1901,7 +1899,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       vmw_dmabuf_unreference(&vmw_bo);
+       vmw_bo_unreference(&vmw_bo);
 
        return ret;
 }
@@ -1928,7 +1926,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
                                     uint32_t *buf_id,
                                     unsigned long backup_offset)
 {
-       struct vmw_dma_buffer *dma_buf;
+       struct vmw_buffer_object *dma_buf;
        int ret;
 
        ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
@@ -1939,7 +1937,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
        if (val_node->first_usage)
                val_node->no_buffer_needed = true;
 
-       vmw_dmabuf_unreference(&val_node->new_backup);
+       vmw_bo_unreference(&val_node->new_backup);
        val_node->new_backup = dma_buf;
        val_node->new_backup_offset = backup_offset;
 
@@ -3701,8 +3699,8 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
                               bool interruptible,
                               bool validate_as_mob)
 {
-       struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
-                                                 base);
+       struct vmw_buffer_object *vbo =
+               container_of(bo, struct vmw_buffer_object, base);
        struct ttm_operation_ctx ctx = { interruptible, true };
        int ret;
 
@@ -4423,7 +4421,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
 
        ttm_bo_unref(&query_val.bo);
        ttm_bo_unref(&pinned_val.bo);
-       vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+       vmw_bo_unreference(&dev_priv->pinned_bo);
 out_unlock:
        return;
 
@@ -4432,7 +4430,7 @@ out_no_emit:
 out_no_reserve:
        ttm_bo_unref(&query_val.bo);
        ttm_bo_unref(&pinned_val.bo);
-       vmw_dmabuf_unreference(&dev_priv->pinned_bo);
+       vmw_bo_unreference(&dev_priv->pinned_bo);
 }
 
 /**
index 9b7e0ac..dcde498 100644 (file)
@@ -42,7 +42,7 @@ struct vmw_fb_par {
        void *vmalloc;
 
        struct mutex bo_mutex;
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        unsigned bo_size;
        struct drm_framebuffer *set_fb;
        struct drm_display_mode *set_mode;
@@ -184,7 +184,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
        struct drm_clip_rect clip;
        struct drm_framebuffer *cur_fb;
        u8 *src_ptr, *dst_ptr;
-       struct vmw_dma_buffer *vbo = par->vmw_bo;
+       struct vmw_buffer_object *vbo = par->vmw_bo;
        void *virtual;
 
        if (!READ_ONCE(par->dirty.active))
@@ -197,7 +197,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
 
        (void) ttm_read_lock(&vmw_priv->reservation_sem, false);
        (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
-       virtual = vmw_dma_buffer_map_and_cache(vbo);
+       virtual = vmw_buffer_object_map_and_cache(vbo);
        if (!virtual)
                goto out_unreserve;
 
@@ -391,9 +391,9 @@ static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
  */
 
 static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
-                           size_t size, struct vmw_dma_buffer **out)
+                           size_t size, struct vmw_buffer_object **out)
 {
-       struct vmw_dma_buffer *vmw_bo;
+       struct vmw_buffer_object *vmw_bo;
        int ret;
 
        (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
@@ -404,10 +404,10 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
                goto err_unlock;
        }
 
-       ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
+       ret = vmw_bo_init(vmw_priv, vmw_bo, size,
                              &vmw_sys_placement,
                              false,
-                             &vmw_dmabuf_bo_free);
+                             &vmw_bo_bo_free);
        if (unlikely(ret != 0))
                goto err_unlock; /* init frees the buffer on failure */
 
@@ -491,7 +491,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
        }
 
        if (par->vmw_bo && detach_bo && unref_bo)
-               vmw_dmabuf_unreference(&par->vmw_bo);
+               vmw_bo_unreference(&par->vmw_bo);
 
        return 0;
 }
index c5e8eae..5e0c8f7 100644 (file)
@@ -377,8 +377,8 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
        }
 
        vfb = vmw_framebuffer_to_vfb(fb);
-       if (!vfb->dmabuf) {
-               DRM_ERROR("Framebuffer not dmabuf backed.\n");
+       if (!vfb->bo) {
+               DRM_ERROR("Framebuffer not buffer backed.\n");
                ret = -EINVAL;
                goto out_no_ttm_lock;
        }
index ef96ba7..7a32be0 100644 (file)
@@ -85,10 +85,10 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
        return 0;
 }
 
-static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
-                                   struct vmw_dma_buffer *dmabuf,
-                                   u32 width, u32 height,
-                                   u32 hotspotX, u32 hotspotY)
+static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
+                               struct vmw_buffer_object *bo,
+                               u32 width, u32 height,
+                               u32 hotspotX, u32 hotspotY)
 {
        struct ttm_bo_kmap_obj map;
        unsigned long kmap_offset;
@@ -100,13 +100,13 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
        kmap_offset = 0;
        kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
-       ret = ttm_bo_reserve(&dmabuf->base, true, false, NULL);
+       ret = ttm_bo_reserve(&bo->base, true, false, NULL);
        if (unlikely(ret != 0)) {
                DRM_ERROR("reserve failed\n");
                return -EINVAL;
        }
 
-       ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+       ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
        if (unlikely(ret != 0))
                goto err_unreserve;
 
@@ -116,7 +116,7 @@ static int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
 
        ttm_bo_kunmap(&map);
 err_unreserve:
-       ttm_bo_unreserve(&dmabuf->base);
+       ttm_bo_unreserve(&bo->base);
 
        return ret;
 }
@@ -352,13 +352,13 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
        if (vps->surf)
                vmw_surface_unreference(&vps->surf);
 
-       if (vps->dmabuf)
-               vmw_dmabuf_unreference(&vps->dmabuf);
+       if (vps->bo)
+               vmw_bo_unreference(&vps->bo);
 
        if (fb) {
-               if (vmw_framebuffer_to_vfb(fb)->dmabuf) {
-                       vps->dmabuf = vmw_framebuffer_to_vfbd(fb)->buffer;
-                       vmw_dmabuf_reference(vps->dmabuf);
+               if (vmw_framebuffer_to_vfb(fb)->bo) {
+                       vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
+                       vmw_bo_reference(vps->bo);
                } else {
                        vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
                        vmw_surface_reference(vps->surf);
@@ -390,7 +390,7 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
        }
 
        du->cursor_surface = vps->surf;
-       du->cursor_dmabuf = vps->dmabuf;
+       du->cursor_bo = vps->bo;
 
        if (vps->surf) {
                du->cursor_age = du->cursor_surface->snooper.age;
@@ -399,11 +399,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
                                              vps->surf->snooper.image,
                                              64, 64, hotspot_x,
                                              hotspot_y);
-       } else if (vps->dmabuf) {
-               ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf,
-                                              plane->state->crtc_w,
-                                              plane->state->crtc_h,
-                                              hotspot_x, hotspot_y);
+       } else if (vps->bo) {
+               ret = vmw_cursor_update_bo(dev_priv, vps->bo,
+                                          plane->state->crtc_w,
+                                          plane->state->crtc_h,
+                                          hotspot_x, hotspot_y);
        } else {
                vmw_cursor_update_position(dev_priv, false, 0, 0);
                return;
@@ -519,7 +519,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
                ret = -EINVAL;
        }
 
-       if (!vmw_framebuffer_to_vfb(fb)->dmabuf)
+       if (!vmw_framebuffer_to_vfb(fb)->bo)
                surface = vmw_framebuffer_to_vfbs(fb)->surface;
 
        if (surface && !surface->snooper.image) {
@@ -687,8 +687,8 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
        if (vps->surf)
                (void) vmw_surface_reference(vps->surf);
 
-       if (vps->dmabuf)
-               (void) vmw_dmabuf_reference(vps->dmabuf);
+       if (vps->bo)
+               (void) vmw_bo_reference(vps->bo);
 
        state = &vps->base;
 
@@ -745,8 +745,8 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
        if (vps->surf)
                vmw_surface_unreference(&vps->surf);
 
-       if (vps->dmabuf)
-               vmw_dmabuf_unreference(&vps->dmabuf);
+       if (vps->bo)
+               vmw_bo_unreference(&vps->bo);
 
        drm_atomic_helper_plane_destroy_state(plane, state);
 }
@@ -902,12 +902,12 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 
 /**
  * vmw_kms_readback - Perform a readback from the screen system to
- * a dma-buffer backed framebuffer.
+ * a buffer-object backed framebuffer.
  *
  * @dev_priv: Pointer to the device private structure.
  * @file_priv: Pointer to a struct drm_file identifying the caller.
  * Must be set to NULL if @user_fence_rep is NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
  * @user_fence_rep: User-space provided structure for fence information.
  * Must be set to non-NULL if @file_priv is non-NULL.
  * @vclips: Array of clip rects.
@@ -951,7 +951,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
                                           struct vmw_framebuffer **out,
                                           const struct drm_mode_fb_cmd2
                                           *mode_cmd,
-                                          bool is_dmabuf_proxy)
+                                          bool is_bo_proxy)
 
 {
        struct drm_device *dev = dev_priv->dev;
@@ -1019,7 +1019,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
        drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
        vfbs->surface = vmw_surface_reference(surface);
        vfbs->base.user_handle = mode_cmd->handles[0];
-       vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
+       vfbs->is_bo_proxy = is_bo_proxy;
 
        *out = &vfbs->base;
 
@@ -1038,30 +1038,30 @@ out_err1:
 }
 
 /*
- * Dmabuf framebuffer code
+ * Buffer-object framebuffer code
  */
 
-static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
+static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
 {
-       struct vmw_framebuffer_dmabuf *vfbd =
+       struct vmw_framebuffer_bo *vfbd =
                vmw_framebuffer_to_vfbd(framebuffer);
 
        drm_framebuffer_cleanup(framebuffer);
-       vmw_dmabuf_unreference(&vfbd->buffer);
+       vmw_bo_unreference(&vfbd->buffer);
        if (vfbd->base.user_obj)
                ttm_base_object_unref(&vfbd->base.user_obj);
 
        kfree(vfbd);
 }
 
-static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
-                                struct drm_file *file_priv,
-                                unsigned flags, unsigned color,
-                                struct drm_clip_rect *clips,
-                                unsigned num_clips)
+static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
+                                   struct drm_file *file_priv,
+                                   unsigned int flags, unsigned int color,
+                                   struct drm_clip_rect *clips,
+                                   unsigned int num_clips)
 {
        struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
-       struct vmw_framebuffer_dmabuf *vfbd =
+       struct vmw_framebuffer_bo *vfbd =
                vmw_framebuffer_to_vfbd(framebuffer);
        struct drm_clip_rect norect;
        int ret, increment = 1;
@@ -1092,13 +1092,13 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
                                       true, true, NULL);
                break;
        case vmw_du_screen_object:
-               ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
-                                                 clips, NULL, num_clips,
-                                                 increment, true, NULL, NULL);
+               ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base,
+                                             clips, NULL, num_clips,
+                                             increment, true, NULL, NULL);
                break;
        case vmw_du_legacy:
-               ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
-                                                 clips, num_clips, increment);
+               ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
+                                             clips, num_clips, increment);
                break;
        default:
                ret = -EINVAL;
@@ -1114,23 +1114,23 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
        return ret;
 }
 
-static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
-       .destroy = vmw_framebuffer_dmabuf_destroy,
-       .dirty = vmw_framebuffer_dmabuf_dirty,
+static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
+       .destroy = vmw_framebuffer_bo_destroy,
+       .dirty = vmw_framebuffer_bo_dirty,
 };
 
 /**
- * Pin the dmabuffer in a location suitable for access by the
+ * Pin the bofer in a location suitable for access by the
  * display system.
  */
 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
 {
        struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
-       struct vmw_dma_buffer *buf;
+       struct vmw_buffer_object *buf;
        struct ttm_placement *placement;
        int ret;
 
-       buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+       buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
                vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
 
        if (!buf)
@@ -1139,12 +1139,12 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
        switch (dev_priv->active_display_unit) {
        case vmw_du_legacy:
                vmw_overlay_pause_all(dev_priv);
-               ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
+               ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
                vmw_overlay_resume_all(dev_priv);
                break;
        case vmw_du_screen_object:
        case vmw_du_screen_target:
-               if (vfb->dmabuf) {
+               if (vfb->bo) {
                        if (dev_priv->capabilities & SVGA_CAP_3D) {
                                /*
                                 * Use surface DMA to get content to
@@ -1160,8 +1160,7 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
                        placement = &vmw_mob_placement;
                }
 
-               return vmw_dmabuf_pin_in_placement(dev_priv, buf, placement,
-                                                  false);
+               return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
        default:
                return -EINVAL;
        }
@@ -1172,36 +1171,36 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
 {
        struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
-       struct vmw_dma_buffer *buf;
+       struct vmw_buffer_object *buf;
 
-       buf = vfb->dmabuf ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+       buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
                vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
 
        if (WARN_ON(!buf))
                return 0;
 
-       return vmw_dmabuf_unpin(dev_priv, buf, false);
+       return vmw_bo_unpin(dev_priv, buf, false);
 }
 
 /**
- * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
+ * vmw_create_bo_proxy - create a proxy surface for the buffer object
  *
  * @dev: DRM device
  * @mode_cmd: parameters for the new surface
- * @dmabuf_mob: MOB backing the DMA buf
+ * @bo_mob: MOB backing the buffer object
  * @srf_out: newly created surface
  *
- * When the content FB is a DMA buf, we create a surface as a proxy to the
+ * When the content FB is a buffer object, we create a surface as a proxy to the
  * same buffer.  This way we can do a surface copy rather than a surface DMA.
  * This is a more efficient approach
  *
  * RETURNS:
  * 0 on success, error code otherwise
  */
-static int vmw_create_dmabuf_proxy(struct drm_device *dev,
-                                  const struct drm_mode_fb_cmd2 *mode_cmd,
-                                  struct vmw_dma_buffer *dmabuf_mob,
-                                  struct vmw_surface **srf_out)
+static int vmw_create_bo_proxy(struct drm_device *dev,
+                              const struct drm_mode_fb_cmd2 *mode_cmd,
+                              struct vmw_buffer_object *bo_mob,
+                              struct vmw_surface **srf_out)
 {
        uint32_t format;
        struct drm_vmw_size content_base_size = {0};
@@ -1258,8 +1257,8 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
        /* Reserve and switch the backing mob. */
        mutex_lock(&res->dev_priv->cmdbuf_mutex);
        (void) vmw_resource_reserve(res, false, true);
-       vmw_dmabuf_unreference(&res->backup);
-       res->backup = vmw_dmabuf_reference(dmabuf_mob);
+       vmw_bo_unreference(&res->backup);
+       res->backup = vmw_bo_reference(bo_mob);
        res->backup_offset = 0;
        vmw_resource_unreserve(res, false, NULL, 0);
        mutex_unlock(&res->dev_priv->cmdbuf_mutex);
@@ -1269,21 +1268,21 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
 
 
 
-static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
-                                         struct vmw_dma_buffer *dmabuf,
-                                         struct vmw_framebuffer **out,
-                                         const struct drm_mode_fb_cmd2
-                                         *mode_cmd)
+static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
+                                     struct vmw_buffer_object *bo,
+                                     struct vmw_framebuffer **out,
+                                     const struct drm_mode_fb_cmd2
+                                     *mode_cmd)
 
 {
        struct drm_device *dev = dev_priv->dev;
-       struct vmw_framebuffer_dmabuf *vfbd;
+       struct vmw_framebuffer_bo *vfbd;
        unsigned int requested_size;
        struct drm_format_name_buf format_name;
        int ret;
 
        requested_size = mode_cmd->height * mode_cmd->pitches[0];
-       if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
+       if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) {
                DRM_ERROR("Screen buffer object size is too small "
                          "for requested mode.\n");
                return -EINVAL;
@@ -1312,20 +1311,20 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
        }
 
        drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
-       vfbd->base.dmabuf = true;
-       vfbd->buffer = vmw_dmabuf_reference(dmabuf);
+       vfbd->base.bo = true;
+       vfbd->buffer = vmw_bo_reference(bo);
        vfbd->base.user_handle = mode_cmd->handles[0];
        *out = &vfbd->base;
 
        ret = drm_framebuffer_init(dev, &vfbd->base.base,
-                                  &vmw_framebuffer_dmabuf_funcs);
+                                  &vmw_framebuffer_bo_funcs);
        if (ret)
                goto out_err2;
 
        return 0;
 
 out_err2:
-       vmw_dmabuf_unreference(&dmabuf);
+       vmw_bo_unreference(&bo);
        kfree(vfbd);
 out_err1:
        return ret;
@@ -1354,57 +1353,57 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
  * vmw_kms_new_framebuffer - Create a new framebuffer.
  *
  * @dev_priv: Pointer to device private struct.
- * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
- * Either @dmabuf or @surface must be NULL.
+ * @bo: Pointer to buffer object to wrap the kms framebuffer around.
+ * Either @bo or @surface must be NULL.
  * @surface: Pointer to a surface to wrap the kms framebuffer around.
- * Either @dmabuf or @surface must be NULL.
- * @only_2d: No presents will occur to this dma buffer based framebuffer. This
- * Helps the code to do some important optimizations.
+ * Either @bo or @surface must be NULL.
+ * @only_2d: No presents will occur to this buffer object based framebuffer.
+ * This helps the code to do some important optimizations.
  * @mode_cmd: Frame-buffer metadata.
  */
 struct vmw_framebuffer *
 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
-                       struct vmw_dma_buffer *dmabuf,
+                       struct vmw_buffer_object *bo,
                        struct vmw_surface *surface,
                        bool only_2d,
                        const struct drm_mode_fb_cmd2 *mode_cmd)
 {
        struct vmw_framebuffer *vfb = NULL;
-       bool is_dmabuf_proxy = false;
+       bool is_bo_proxy = false;
        int ret;
 
        /*
         * We cannot use the SurfaceDMA command in an non-accelerated VM,
-        * therefore, wrap the DMA buf in a surface so we can use the
+        * therefore, wrap the buffer object in a surface so we can use the
         * SurfaceCopy command.
         */
        if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
-           dmabuf && only_2d &&
+           bo && only_2d &&
            mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
            dev_priv->active_display_unit == vmw_du_screen_target) {
-               ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
-                                             dmabuf, &surface);
+               ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd,
+                                         bo, &surface);
                if (ret)
                        return ERR_PTR(ret);
 
-               is_dmabuf_proxy = true;
+               is_bo_proxy = true;
        }
 
        /* Create the new framebuffer depending one what we have */
        if (surface) {
                ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
                                                      mode_cmd,
-                                                     is_dmabuf_proxy);
+                                                     is_bo_proxy);
 
                /*
-                * vmw_create_dmabuf_proxy() adds a reference that is no longer
+                * vmw_create_bo_proxy() adds a reference that is no longer
                 * needed
                 */
-               if (is_dmabuf_proxy)
+               if (is_bo_proxy)
                        vmw_surface_unreference(&surface);
-       } else if (dmabuf) {
-               ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
-                                                    mode_cmd);
+       } else if (bo) {
+               ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
+                                                mode_cmd);
        } else {
                BUG();
        }
@@ -1430,7 +1429,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        struct vmw_framebuffer *vfb = NULL;
        struct vmw_surface *surface = NULL;
-       struct vmw_dma_buffer *bo = NULL;
+       struct vmw_buffer_object *bo = NULL;
        struct ttm_base_object *user_obj;
        int ret;
 
@@ -1466,7 +1465,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
         * End conditioned code.
         */
 
-       /* returns either a dmabuf or surface */
+       /* returns either a bo or surface */
        ret = vmw_user_lookup_handle(dev_priv, tfile,
                                     mode_cmd->handles[0],
                                     &surface, &bo);
@@ -1494,7 +1493,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
 err_out:
        /* vmw_user_lookup_handle takes one ref so does new_fb */
        if (bo)
-               vmw_dmabuf_unreference(&bo);
+               vmw_bo_unreference(&bo);
        if (surface)
                vmw_surface_unreference(&surface);
 
@@ -2427,7 +2426,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
  * interrupted by a signal.
  */
 int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
-                                 struct vmw_dma_buffer *buf,
+                                 struct vmw_buffer_object *buf,
                                  bool interruptible,
                                  bool validate_as_mob,
                                  bool for_cpu_blit)
@@ -2459,7 +2458,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
  * Helper to be used if an error forces the caller to undo the actions of
  * vmw_kms_helper_buffer_prepare.
  */
-void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
+void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
 {
        if (buf)
                ttm_bo_unreserve(&buf->base);
@@ -2482,7 +2481,7 @@ void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
  */
 void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
                                  struct drm_file *file_priv,
-                                 struct vmw_dma_buffer *buf,
+                                 struct vmw_buffer_object *buf,
                                  struct vmw_fence_obj **out_fence,
                                  struct drm_vmw_fence_rep __user *
                                  user_fence_rep)
@@ -2522,7 +2521,7 @@ void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
        struct vmw_resource *res = ctx->res;
 
        vmw_kms_helper_buffer_revert(ctx->buf);
-       vmw_dmabuf_unreference(&ctx->buf);
+       vmw_bo_unreference(&ctx->buf);
        vmw_resource_unreserve(res, false, NULL, 0);
        mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 }
@@ -2567,7 +2566,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
                if (ret)
                        goto out_unreserve;
 
-               ctx->buf = vmw_dmabuf_reference(res->backup);
+               ctx->buf = vmw_bo_reference(res->backup);
        }
        ret = vmw_resource_validate(res);
        if (ret)
@@ -2600,7 +2599,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
                vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
                                             out_fence, NULL);
 
-       vmw_dmabuf_unreference(&ctx->buf);
+       vmw_bo_unreference(&ctx->buf);
        vmw_resource_unreserve(res, false, NULL, 0);
        mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 }
index 6b7c012..ff1caed 100644 (file)
@@ -90,7 +90,7 @@ struct vmw_kms_dirty {
 #define vmw_framebuffer_to_vfbs(x) \
        container_of(x, struct vmw_framebuffer_surface, base.base)
 #define vmw_framebuffer_to_vfbd(x) \
-       container_of(x, struct vmw_framebuffer_dmabuf, base.base)
+       container_of(x, struct vmw_framebuffer_bo, base.base)
 
 /**
  * Base class for framebuffers
@@ -102,7 +102,7 @@ struct vmw_framebuffer {
        struct drm_framebuffer base;
        int (*pin)(struct vmw_framebuffer *fb);
        int (*unpin)(struct vmw_framebuffer *fb);
-       bool dmabuf;
+       bool bo;
        struct ttm_base_object *user_obj;
        uint32_t user_handle;
 };
@@ -117,15 +117,15 @@ struct vmw_clip_rect {
 struct vmw_framebuffer_surface {
        struct vmw_framebuffer base;
        struct vmw_surface *surface;
-       struct vmw_dma_buffer *buffer;
+       struct vmw_buffer_object *buffer;
        struct list_head head;
-       bool is_dmabuf_proxy;  /* true if this is proxy surface for DMA buf */
+       bool is_bo_proxy;  /* true if this is proxy surface for DMA buf */
 };
 
 
-struct vmw_framebuffer_dmabuf {
+struct vmw_framebuffer_bo {
        struct vmw_framebuffer base;
-       struct vmw_dma_buffer *buffer;
+       struct vmw_buffer_object *buffer;
 };
 
 
@@ -161,18 +161,18 @@ struct vmw_crtc_state {
  *
  * @base DRM plane object
  * @surf Display surface for STDU
- * @dmabuf display dmabuf for SOU
+ * @bo display bo for SOU
  * @content_fb_type Used by STDU.
- * @dmabuf_size Size of the dmabuf, used by Screen Object Display Unit
+ * @bo_size Size of the bo, used by Screen Object Display Unit
  * @pinned pin count for STDU display surface
  */
 struct vmw_plane_state {
        struct drm_plane_state base;
        struct vmw_surface *surf;
-       struct vmw_dma_buffer *dmabuf;
+       struct vmw_buffer_object *bo;
 
        int content_fb_type;
-       unsigned long dmabuf_size;
+       unsigned long bo_size;
 
        int pinned;
 
@@ -209,7 +209,7 @@ struct vmw_display_unit {
        struct drm_plane cursor;
 
        struct vmw_surface *cursor_surface;
-       struct vmw_dma_buffer *cursor_dmabuf;
+       struct vmw_buffer_object *cursor_bo;
        size_t cursor_age;
 
        int cursor_x;
@@ -243,7 +243,7 @@ struct vmw_display_unit {
 
 struct vmw_validation_ctx {
        struct vmw_resource *res;
-       struct vmw_dma_buffer *buf;
+       struct vmw_buffer_object *buf;
 };
 
 #define vmw_crtc_to_du(x) \
@@ -291,14 +291,14 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
                         struct vmw_kms_dirty *dirty);
 
 int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
-                                 struct vmw_dma_buffer *buf,
+                                 struct vmw_buffer_object *buf,
                                  bool interruptible,
                                  bool validate_as_mob,
                                  bool for_cpu_blit);
-void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
+void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf);
 void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
                                  struct drm_file *file_priv,
-                                 struct vmw_dma_buffer *buf,
+                                 struct vmw_buffer_object *buf,
                                  struct vmw_fence_obj **out_fence,
                                  struct drm_vmw_fence_rep __user *
                                  user_fence_rep);
@@ -316,7 +316,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
                     uint32_t num_clips);
 struct vmw_framebuffer *
 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
-                       struct vmw_dma_buffer *dmabuf,
+                       struct vmw_buffer_object *bo,
                        struct vmw_surface *surface,
                        bool only_2d,
                        const struct drm_mode_fb_cmd2 *mode_cmd);
@@ -384,11 +384,11 @@ void vmw_du_connector_destroy_state(struct drm_connector *connector,
  */
 int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
 int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
-int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
-                               struct vmw_framebuffer *framebuffer,
-                               unsigned flags, unsigned color,
-                               struct drm_clip_rect *clips,
-                               unsigned num_clips, int increment);
+int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+                           struct vmw_framebuffer *framebuffer,
+                           unsigned int flags, unsigned int color,
+                           struct drm_clip_rect *clips,
+                           unsigned int num_clips, int increment);
 int vmw_kms_update_proxy(struct vmw_resource *res,
                         const struct drm_clip_rect *clips,
                         unsigned num_clips,
@@ -408,14 +408,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
                                 unsigned num_clips, int inc,
                                 struct vmw_fence_obj **out_fence,
                                 struct drm_crtc *crtc);
-int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
-                               struct vmw_framebuffer *framebuffer,
-                               struct drm_clip_rect *clips,
-                               struct drm_vmw_rect *vclips,
-                               unsigned num_clips, int increment,
-                               bool interruptible,
-                               struct vmw_fence_obj **out_fence,
-                               struct drm_crtc *crtc);
+int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
+                           struct vmw_framebuffer *framebuffer,
+                           struct drm_clip_rect *clips,
+                           struct drm_vmw_rect *vclips,
+                           unsigned int num_clips, int increment,
+                           bool interruptible,
+                           struct vmw_fence_obj **out_fence,
+                           struct drm_crtc *crtc);
 int vmw_kms_sou_readback(struct vmw_private *dev_priv,
                         struct drm_file *file_priv,
                         struct vmw_framebuffer *vfb,
index 4a5907e..a2dd9a8 100644 (file)
@@ -547,11 +547,11 @@ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv)
 }
 
 
-int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
-                               struct vmw_framebuffer *framebuffer,
-                               unsigned flags, unsigned color,
-                               struct drm_clip_rect *clips,
-                               unsigned num_clips, int increment)
+int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
+                           struct vmw_framebuffer *framebuffer,
+                           unsigned int flags, unsigned int color,
+                           struct drm_clip_rect *clips,
+                           unsigned int num_clips, int increment)
 {
        size_t fifo_size;
        int i;
index 222c9c2..09420ef 100644 (file)
@@ -38,7 +38,7 @@
 #define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
 
 struct vmw_stream {
-       struct vmw_dma_buffer *buf;
+       struct vmw_buffer_object *buf;
        bool claimed;
        bool paused;
        struct drm_vmw_control_stream_arg saved;
@@ -94,7 +94,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
  * -ERESTARTSYS if interrupted by a signal.
  */
 static int vmw_overlay_send_put(struct vmw_private *dev_priv,
-                               struct vmw_dma_buffer *buf,
+                               struct vmw_buffer_object *buf,
                                struct drm_vmw_control_stream_arg *arg,
                                bool interruptible)
 {
@@ -225,16 +225,16 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
  * used with GMRs instead of being locked to vram.
  */
 static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
-                                  struct vmw_dma_buffer *buf,
+                                  struct vmw_buffer_object *buf,
                                   bool pin, bool inter)
 {
        if (!pin)
-               return vmw_dmabuf_unpin(dev_priv, buf, inter);
+               return vmw_bo_unpin(dev_priv, buf, inter);
 
        if (dev_priv->active_display_unit == vmw_du_legacy)
-               return vmw_dmabuf_pin_in_vram(dev_priv, buf, inter);
+               return vmw_bo_pin_in_vram(dev_priv, buf, inter);
 
-       return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, inter);
+       return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
 }
 
 /**
@@ -278,7 +278,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
        }
 
        if (!pause) {
-               vmw_dmabuf_unreference(&stream->buf);
+               vmw_bo_unreference(&stream->buf);
                stream->paused = false;
        } else {
                stream->paused = true;
@@ -297,7 +297,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
  * -ERESTARTSYS if interrupted.
  */
 static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
-                                    struct vmw_dma_buffer *buf,
+                                    struct vmw_buffer_object *buf,
                                     struct drm_vmw_control_stream_arg *arg,
                                     bool interruptible)
 {
@@ -347,7 +347,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
        }
 
        if (stream->buf != buf)
-               stream->buf = vmw_dmabuf_reference(buf);
+               stream->buf = vmw_bo_reference(buf);
        stream->saved = *arg;
        /* stream is no longer stopped/paused */
        stream->paused = false;
@@ -466,7 +466,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
        struct vmw_overlay *overlay = dev_priv->overlay_priv;
        struct drm_vmw_control_stream_arg *arg =
            (struct drm_vmw_control_stream_arg *)data;
-       struct vmw_dma_buffer *buf;
+       struct vmw_buffer_object *buf;
        struct vmw_resource *res;
        int ret;
 
@@ -484,13 +484,13 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
                goto out_unlock;
        }
 
-       ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
+       ret = vmw_user_bo_lookup(tfile, arg->handle, &buf, NULL);
        if (ret)
                goto out_unlock;
 
        ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
 
-       vmw_dmabuf_unreference(&buf);
+       vmw_bo_unreference(&buf);
 
 out_unlock:
        mutex_unlock(&overlay->mutex);
index 6b3a942..5aaf9ac 100644 (file)
@@ -35,9 +35,9 @@
 
 #define VMW_RES_EVICT_ERR_COUNT 10
 
-struct vmw_user_dma_buffer {
+struct vmw_user_buffer_object {
        struct ttm_prime_object prime;
-       struct vmw_dma_buffer dma;
+       struct vmw_buffer_object vbo;
 };
 
 struct vmw_bo_user_rep {
@@ -45,17 +45,18 @@ struct vmw_bo_user_rep {
        uint64_t map_handle;
 };
 
-static inline struct vmw_dma_buffer *
-vmw_dma_buffer(struct ttm_buffer_object *bo)
+static inline struct vmw_buffer_object *
+vmw_buffer_object(struct ttm_buffer_object *bo)
 {
-       return container_of(bo, struct vmw_dma_buffer, base);
+       return container_of(bo, struct vmw_buffer_object, base);
 }
 
-static inline struct vmw_user_dma_buffer *
-vmw_user_dma_buffer(struct ttm_buffer_object *bo)
+static inline struct vmw_user_buffer_object *
+vmw_user_buffer_object(struct ttm_buffer_object *bo)
 {
-       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-       return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
+       struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
+
+       return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
 }
 
 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
@@ -116,7 +117,7 @@ static void vmw_resource_release(struct kref *kref)
                res->backup_dirty = false;
                list_del_init(&res->mob_head);
                ttm_bo_unreserve(bo);
-               vmw_dmabuf_unreference(&res->backup);
+               vmw_bo_unreference(&res->backup);
        }
 
        if (likely(res->hw_destroy != NULL)) {
@@ -287,7 +288,7 @@ out_bad_resource:
 }
 
 /**
- * Helper function that looks either a surface or dmabuf.
+ * Helper function that looks either a surface or bo.
  *
  * The pointer this pointed at by out_surf and out_buf needs to be null.
  */
@@ -295,7 +296,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
                           struct ttm_object_file *tfile,
                           uint32_t handle,
                           struct vmw_surface **out_surf,
-                          struct vmw_dma_buffer **out_buf)
+                          struct vmw_buffer_object **out_buf)
 {
        struct vmw_resource *res;
        int ret;
@@ -311,7 +312,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
        }
 
        *out_surf = NULL;
-       ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
+       ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
        return ret;
 }
 
@@ -320,14 +321,14 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
  */
 
 /**
- * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
+ * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
  *
  * @dev_priv: Pointer to a struct vmw_private identifying the device.
  * @size: The requested buffer size.
  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
  */
-static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
-                                 bool user)
+static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
+                             bool user)
 {
        static size_t struct_size, user_struct_size;
        size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -337,9 +338,9 @@ static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
                size_t backend_size = ttm_round_pot(vmw_tt_size);
 
                struct_size = backend_size +
-                       ttm_round_pot(sizeof(struct vmw_dma_buffer));
+                       ttm_round_pot(sizeof(struct vmw_buffer_object));
                user_struct_size = backend_size +
-                       ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
+                       ttm_round_pot(sizeof(struct vmw_user_buffer_object));
        }
 
        if (dev_priv->map_mode == vmw_dma_alloc_coherent)
@@ -350,36 +351,36 @@ static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
                page_array_size;
 }
 
-void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
+void vmw_bo_bo_free(struct ttm_buffer_object *bo)
 {
-       struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+       struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
 
-       vmw_dma_buffer_unmap(vmw_bo);
+       vmw_buffer_object_unmap(vmw_bo);
        kfree(vmw_bo);
 }
 
-static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
+static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
 {
-       struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+       struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
 
-       vmw_dma_buffer_unmap(&vmw_user_bo->dma);
+       vmw_buffer_object_unmap(&vmw_user_bo->vbo);
        ttm_prime_object_kfree(vmw_user_bo, prime);
 }
 
-int vmw_dmabuf_init(struct vmw_private *dev_priv,
-                   struct vmw_dma_buffer *vmw_bo,
-                   size_t size, struct ttm_placement *placement,
-                   bool interruptible,
-                   void (*bo_free) (struct ttm_buffer_object *bo))
+int vmw_bo_init(struct vmw_private *dev_priv,
+               struct vmw_buffer_object *vmw_bo,
+               size_t size, struct ttm_placement *placement,
+               bool interruptible,
+               void (*bo_free)(struct ttm_buffer_object *bo))
 {
        struct ttm_bo_device *bdev = &dev_priv->bdev;
        size_t acc_size;
        int ret;
-       bool user = (bo_free == &vmw_user_dmabuf_destroy);
+       bool user = (bo_free == &vmw_user_bo_destroy);
 
-       BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
+       WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
 
-       acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
+       acc_size = vmw_bo_acc_size(dev_priv, size, user);
        memset(vmw_bo, 0, sizeof(*vmw_bo));
 
        INIT_LIST_HEAD(&vmw_bo->res_list);
@@ -391,9 +392,9 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
        return ret;
 }
 
-static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
+static void vmw_user_bo_release(struct ttm_base_object **p_base)
 {
-       struct vmw_user_dma_buffer *vmw_user_bo;
+       struct vmw_user_buffer_object *vmw_user_bo;
        struct ttm_base_object *base = *p_base;
        struct ttm_buffer_object *bo;
 
@@ -402,21 +403,22 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
        if (unlikely(base == NULL))
                return;
 
-       vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+       vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
                                   prime.base);
-       bo = &vmw_user_bo->dma.base;
+       bo = &vmw_user_bo->vbo.base;
        ttm_bo_unref(&bo);
 }
 
-static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
-                                           enum ttm_ref_type ref_type)
+static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
+                                       enum ttm_ref_type ref_type)
 {
-       struct vmw_user_dma_buffer *user_bo;
-       user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
+       struct vmw_user_buffer_object *user_bo;
+
+       user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
 
        switch (ref_type) {
        case TTM_REF_SYNCCPU_WRITE:
-               ttm_bo_synccpu_write_release(&user_bo->dma.base);
+               ttm_bo_synccpu_write_release(&user_bo->vbo.base);
                break;
        default:
                BUG();
@@ -424,7 +426,7 @@ static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
 }
 
 /**
- * vmw_user_dmabuf_alloc - Allocate a user dma buffer
+ * vmw_user_bo_alloc - Allocate a user dma buffer
  *
  * @dev_priv: Pointer to a struct device private.
  * @tfile: Pointer to a struct ttm_object_file on which to register the user
@@ -432,18 +434,18 @@ static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
  * @size: Size of the dma buffer.
  * @shareable: Boolean whether the buffer is shareable with other open files.
  * @handle: Pointer to where the handle value should be assigned.
- * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
+ * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
  * should be assigned.
  */
-int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
-                         struct ttm_object_file *tfile,
-                         uint32_t size,
-                         bool shareable,
-                         uint32_t *handle,
-                         struct vmw_dma_buffer **p_dma_buf,
-                         struct ttm_base_object **p_base)
+int vmw_user_bo_alloc(struct vmw_private *dev_priv,
+                     struct ttm_object_file *tfile,
+                     uint32_t size,
+                     bool shareable,
+                     uint32_t *handle,
+                     struct vmw_buffer_object **p_vbo,
+                     struct ttm_base_object **p_base)
 {
-       struct vmw_user_dma_buffer *user_bo;
+       struct vmw_user_buffer_object *user_bo;
        struct ttm_buffer_object *tmp;
        int ret;
 
@@ -453,28 +455,28 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
                return -ENOMEM;
        }
 
-       ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
-                             (dev_priv->has_mob) ?
-                             &vmw_sys_placement :
-                             &vmw_vram_sys_placement, true,
-                             &vmw_user_dmabuf_destroy);
+       ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
+                         (dev_priv->has_mob) ?
+                         &vmw_sys_placement :
+                         &vmw_vram_sys_placement, true,
+                         &vmw_user_bo_destroy);
        if (unlikely(ret != 0))
                return ret;
 
-       tmp = ttm_bo_reference(&user_bo->dma.base);
+       tmp = ttm_bo_reference(&user_bo->vbo.base);
        ret = ttm_prime_object_init(tfile,
                                    size,
                                    &user_bo->prime,
                                    shareable,
                                    ttm_buffer_type,
-                                   &vmw_user_dmabuf_release,
-                                   &vmw_user_dmabuf_ref_obj_release);
+                                   &vmw_user_bo_release,
+                                   &vmw_user_bo_ref_obj_release);
        if (unlikely(ret != 0)) {
                ttm_bo_unref(&tmp);
                goto out_no_base_object;
        }
 
-       *p_dma_buf = &user_bo->dma;
+       *p_vbo = &user_bo->vbo;
        if (p_base) {
                *p_base = &user_bo->prime.base;
                kref_get(&(*p_base)->refcount);
@@ -486,21 +488,21 @@ out_no_base_object:
 }
 
 /**
- * vmw_user_dmabuf_verify_access - verify access permissions on this
+ * vmw_user_bo_verify_access - verify access permissions on this
  * buffer object.
  *
  * @bo: Pointer to the buffer object being accessed
  * @tfile: Identifying the caller.
  */
-int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
                                  struct ttm_object_file *tfile)
 {
-       struct vmw_user_dma_buffer *vmw_user_bo;
+       struct vmw_user_buffer_object *vmw_user_bo;
 
-       if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
+       if (unlikely(bo->destroy != vmw_user_bo_destroy))
                return -EPERM;
 
-       vmw_user_bo = vmw_user_dma_buffer(bo);
+       vmw_user_bo = vmw_user_buffer_object(bo);
 
        /* Check that the caller has opened the object. */
        if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
@@ -511,7 +513,7 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
 }
 
 /**
- * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
+ * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
  * access, idling previous GPU operations on the buffer and optionally
  * blocking it for further command submissions.
  *
@@ -521,11 +523,11 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
  *
  * A blocking grab will be automatically released when @tfile is closed.
  */
-static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
+static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
                                        struct ttm_object_file *tfile,
                                        uint32_t flags)
 {
-       struct ttm_buffer_object *bo = &user_bo->dma.base;
+       struct ttm_buffer_object *bo = &user_bo->vbo.base;
        bool existed;
        int ret;
 
@@ -550,20 +552,20 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
        ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
                                 TTM_REF_SYNCCPU_WRITE, &existed, false);
        if (ret != 0 || existed)
-               ttm_bo_synccpu_write_release(&user_bo->dma.base);
+               ttm_bo_synccpu_write_release(&user_bo->vbo.base);
 
        return ret;
 }
 
 /**
- * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
+ * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
  * and unblock command submission on the buffer if blocked.
  *
  * @handle: Handle identifying the buffer object.
  * @tfile: Identifying the caller.
  * @flags: Flags indicating the type of release.
  */
-static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
+static int vmw_user_bo_synccpu_release(uint32_t handle,
                                           struct ttm_object_file *tfile,
                                           uint32_t flags)
 {
@@ -575,7 +577,7 @@ static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
 }
 
 /**
- * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
+ * vmw_user_bo_synccpu_release - ioctl function implementing the synccpu
  * functionality.
  *
  * @dev: Identifies the drm device.
@@ -585,13 +587,13 @@ static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
  * This function checks the ioctl arguments for validity and calls the
  * relevant synccpu functions.
  */
-int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv)
 {
        struct drm_vmw_synccpu_arg *arg =
                (struct drm_vmw_synccpu_arg *) data;
-       struct vmw_dma_buffer *dma_buf;
-       struct vmw_user_dma_buffer *user_bo;
+       struct vmw_buffer_object *vbo;
+       struct vmw_user_buffer_object *user_bo;
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
        struct ttm_base_object *buffer_base;
        int ret;
@@ -606,15 +608,15 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
 
        switch (arg->op) {
        case drm_vmw_synccpu_grab:
-               ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
+               ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
                                             &buffer_base);
                if (unlikely(ret != 0))
                        return ret;
 
-               user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
-                                      dma);
-               ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
-               vmw_dmabuf_unreference(&dma_buf);
+               user_bo = container_of(vbo, struct vmw_user_buffer_object,
+                                      vbo);
+               ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
+               vmw_bo_unreference(&vbo);
                ttm_base_object_unref(&buffer_base);
                if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
                             ret != -EBUSY)) {
@@ -624,8 +626,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
                }
                break;
        case drm_vmw_synccpu_release:
-               ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
-                                                     arg->flags);
+               ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
+                                                 arg->flags);
                if (unlikely(ret != 0)) {
                        DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
                                  (unsigned int) arg->handle);
@@ -640,15 +642,15 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
-                          struct drm_file *file_priv)
+int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
        union drm_vmw_alloc_dmabuf_arg *arg =
            (union drm_vmw_alloc_dmabuf_arg *)data;
        struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
        struct drm_vmw_dmabuf_rep *rep = &arg->rep;
-       struct vmw_dma_buffer *dma_buf;
+       struct vmw_buffer_object *vbo;
        uint32_t handle;
        int ret;
 
@@ -656,27 +658,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
        if (unlikely(ret != 0))
                return ret;
 
-       ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
-                                   req->size, false, &handle, &dma_buf,
-                                   NULL);
+       ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+                               req->size, false, &handle, &vbo,
+                               NULL);
        if (unlikely(ret != 0))
-               goto out_no_dmabuf;
+               goto out_no_bo;
 
        rep->handle = handle;
-       rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
+       rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
        rep->cur_gmr_id = handle;
        rep->cur_gmr_offset = 0;
 
-       vmw_dmabuf_unreference(&dma_buf);
+       vmw_bo_unreference(&vbo);
 
-out_no_dmabuf:
+out_no_bo:
        ttm_read_unlock(&dev_priv->reservation_sem);
 
        return ret;
 }
 
-int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
-                          struct drm_file *file_priv)
+int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
 {
        struct drm_vmw_unref_dmabuf_arg *arg =
            (struct drm_vmw_unref_dmabuf_arg *)data;
@@ -686,11 +688,11 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
                                         TTM_REF_USAGE);
 }
 
-int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
-                          uint32_t handle, struct vmw_dma_buffer **out,
+int vmw_user_bo_lookup(struct ttm_object_file *tfile,
+                          uint32_t handle, struct vmw_buffer_object **out,
                           struct ttm_base_object **p_base)
 {
-       struct vmw_user_dma_buffer *vmw_user_bo;
+       struct vmw_user_buffer_object *vmw_user_bo;
        struct ttm_base_object *base;
 
        base = ttm_base_object_lookup(tfile, handle);
@@ -707,28 +709,28 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
                return -EINVAL;
        }
 
-       vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+       vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
                                   prime.base);
-       (void)ttm_bo_reference(&vmw_user_bo->dma.base);
+       (void)ttm_bo_reference(&vmw_user_bo->vbo.base);
        if (p_base)
                *p_base = base;
        else
                ttm_base_object_unref(&base);
-       *out = &vmw_user_bo->dma;
+       *out = &vmw_user_bo->vbo;
 
        return 0;
 }
 
-int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
-                             struct vmw_dma_buffer *dma_buf,
+int vmw_user_bo_reference(struct ttm_object_file *tfile,
+                             struct vmw_buffer_object *vbo,
                              uint32_t *handle)
 {
-       struct vmw_user_dma_buffer *user_bo;
+       struct vmw_user_buffer_object *user_bo;
 
-       if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
+       if (vbo->base.destroy != vmw_user_bo_destroy)
                return -EINVAL;
 
-       user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+       user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
 
        *handle = user_bo->prime.base.hash.key;
        return ttm_ref_object_add(tfile, &user_bo->prime.base,
@@ -743,7 +745,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
  * @args: Pointer to a struct drm_mode_create_dumb structure
  *
  * This is a driver callback for the core drm create_dumb functionality.
- * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
+ * Note that this is very similar to the vmw_bo_alloc ioctl, except
  * that the arguments have a different format.
  */
 int vmw_dumb_create(struct drm_file *file_priv,
@@ -751,7 +753,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
                    struct drm_mode_create_dumb *args)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
-       struct vmw_dma_buffer *dma_buf;
+       struct vmw_buffer_object *vbo;
        int ret;
 
        args->pitch = args->width * ((args->bpp + 7) / 8);
@@ -761,14 +763,14 @@ int vmw_dumb_create(struct drm_file *file_priv,
        if (unlikely(ret != 0))
                return ret;
 
-       ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+       ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
                                    args->size, false, &args->handle,
-                                   &dma_buf, NULL);
+                                   &vbo, NULL);
        if (unlikely(ret != 0))
-               goto out_no_dmabuf;
+               goto out_no_bo;
 
-       vmw_dmabuf_unreference(&dma_buf);
-out_no_dmabuf:
+       vmw_bo_unreference(&vbo);
+out_no_bo:
        ttm_read_unlock(&dev_priv->reservation_sem);
        return ret;
 }
@@ -788,15 +790,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
                        uint64_t *offset)
 {
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct vmw_dma_buffer *out_buf;
+       struct vmw_buffer_object *out_buf;
        int ret;
 
-       ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
+       ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
        if (ret != 0)
                return -EINVAL;
 
        *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
-       vmw_dmabuf_unreference(&out_buf);
+       vmw_bo_unreference(&out_buf);
        return 0;
 }
 
@@ -829,7 +831,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
 {
        unsigned long size =
                (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
-       struct vmw_dma_buffer *backup;
+       struct vmw_buffer_object *backup;
        int ret;
 
        if (likely(res->backup)) {
@@ -841,16 +843,16 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
        if (unlikely(!backup))
                return -ENOMEM;
 
-       ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+       ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
                              res->func->backup_placement,
                              interruptible,
-                             &vmw_dmabuf_bo_free);
+                             &vmw_bo_bo_free);
        if (unlikely(ret != 0))
-               goto out_no_dmabuf;
+               goto out_no_bo;
 
        res->backup = backup;
 
-out_no_dmabuf:
+out_no_bo:
        return ret;
 }
 
@@ -919,7 +921,7 @@ out_bind_failed:
  */
 void vmw_resource_unreserve(struct vmw_resource *res,
                            bool switch_backup,
-                           struct vmw_dma_buffer *new_backup,
+                           struct vmw_buffer_object *new_backup,
                            unsigned long new_backup_offset)
 {
        struct vmw_private *dev_priv = res->dev_priv;
@@ -931,11 +933,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
                if (res->backup) {
                        lockdep_assert_held(&res->backup->base.resv->lock.base);
                        list_del_init(&res->mob_head);
-                       vmw_dmabuf_unreference(&res->backup);
+                       vmw_bo_unreference(&res->backup);
                }
 
                if (new_backup) {
-                       res->backup = vmw_dmabuf_reference(new_backup);
+                       res->backup = vmw_bo_reference(new_backup);
                        lockdep_assert_held(&new_backup->base.resv->lock.base);
                        list_add_tail(&res->mob_head, &new_backup->res_list);
                } else {
@@ -1007,7 +1009,7 @@ out_no_validate:
 out_no_reserve:
        ttm_bo_unref(&val_buf->bo);
        if (backup_dirty)
-               vmw_dmabuf_unreference(&res->backup);
+               vmw_bo_unreference(&res->backup);
 
        return ret;
 }
@@ -1171,7 +1173,7 @@ int vmw_resource_validate(struct vmw_resource *res)
                goto out_no_validate;
        else if (!res->func->needs_backup && res->backup) {
                list_del_init(&res->mob_head);
-               vmw_dmabuf_unreference(&res->backup);
+               vmw_bo_unreference(&res->backup);
        }
 
        return 0;
@@ -1230,22 +1232,22 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo,
 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
                              struct ttm_mem_reg *mem)
 {
-       struct vmw_dma_buffer *dma_buf;
+       struct vmw_buffer_object *vbo;
 
        if (mem == NULL)
                return;
 
-       if (bo->destroy != vmw_dmabuf_bo_free &&
-           bo->destroy != vmw_user_dmabuf_destroy)
+       if (bo->destroy != vmw_bo_bo_free &&
+           bo->destroy != vmw_user_bo_destroy)
                return;
 
-       dma_buf = container_of(bo, struct vmw_dma_buffer, base);
+       vbo = container_of(bo, struct vmw_buffer_object, base);
 
        /*
         * Kill any cached kernel maps before move. An optimization could
         * be to do this iff source or destination memory type is VRAM.
         */
-       vmw_dma_buffer_unmap(dma_buf);
+       vmw_buffer_object_unmap(vbo);
 
        if (mem->mem_type != VMW_PL_MOB) {
                struct vmw_resource *res, *n;
@@ -1254,7 +1256,7 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
                val_buf.bo = bo;
                val_buf.shared = false;
 
-               list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
+               list_for_each_entry_safe(res, n, &vbo->res_list, mob_head) {
 
                        if (unlikely(res->func->unbind == NULL))
                                continue;
@@ -1277,12 +1279,12 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
  */
 void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
 {
-       if (bo->destroy != vmw_dmabuf_bo_free &&
-           bo->destroy != vmw_user_dmabuf_destroy)
+       if (bo->destroy != vmw_bo_bo_free &&
+           bo->destroy != vmw_user_bo_destroy)
                return;
 
        /* Kill any cached kernel maps before swapout */
-       vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
+       vmw_buffer_object_unmap(vmw_buffer_object(bo));
 }
 
 
@@ -1294,7 +1296,7 @@ void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
  * Read back cached states from the device if they exist.  This function
  * assumings binding_mutex is held.
  */
-int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
+int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
 {
        struct vmw_resource *dx_query_ctx;
        struct vmw_private *dev_priv;
@@ -1344,7 +1346,7 @@ int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
 void vmw_query_move_notify(struct ttm_buffer_object *bo,
                           struct ttm_mem_reg *mem)
 {
-       struct vmw_dma_buffer *dx_query_mob;
+       struct vmw_buffer_object *dx_query_mob;
        struct ttm_bo_device *bdev = bo->bdev;
        struct vmw_private *dev_priv;
 
@@ -1353,7 +1355,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
 
        mutex_lock(&dev_priv->binding_mutex);
 
-       dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
+       dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
        if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
                mutex_unlock(&dev_priv->binding_mutex);
                return;
@@ -1481,7 +1483,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
                goto out_no_reserve;
 
        if (res->pin_count == 0) {
-               struct vmw_dma_buffer *vbo = NULL;
+               struct vmw_buffer_object *vbo = NULL;
 
                if (res->backup) {
                        vbo = res->backup;
@@ -1539,7 +1541,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
 
        WARN_ON(res->pin_count == 0);
        if (--res->pin_count == 0 && res->backup) {
-               struct vmw_dma_buffer *vbo = res->backup;
+               struct vmw_buffer_object *vbo = res->backup;
 
                (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
                vmw_bo_pin_reserved(vbo, false);
index 9798640..74dfd46 100644 (file)
@@ -66,7 +66,7 @@ struct vmw_kms_sou_readback_blit {
        SVGAFifoCmdBlitScreenToGMRFB body;
 };
 
-struct vmw_kms_sou_dmabuf_blit {
+struct vmw_kms_sou_bo_blit {
        uint32 header;
        SVGAFifoCmdBlitGMRFBToScreen body;
 };
@@ -83,7 +83,7 @@ struct vmw_screen_object_unit {
        struct vmw_display_unit base;
 
        unsigned long buffer_size; /**< Size of allocated buffer */
-       struct vmw_dma_buffer *buffer; /**< Backing store buffer */
+       struct vmw_buffer_object *buffer; /**< Backing store buffer */
 
        bool defined;
 };
@@ -240,8 +240,8 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
        }
 
        if (vfb) {
-               sou->buffer = vps->dmabuf;
-               sou->buffer_size = vps->dmabuf_size;
+               sou->buffer = vps->bo;
+               sou->buffer_size = vps->bo_size;
 
                ret = vmw_sou_fifo_create(dev_priv, sou, crtc->x, crtc->y,
                                          &crtc->mode);
@@ -408,10 +408,10 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
        struct drm_crtc *crtc = plane->state->crtc ?
                plane->state->crtc : old_state->crtc;
 
-       if (vps->dmabuf)
-               vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false);
-       vmw_dmabuf_unreference(&vps->dmabuf);
-       vps->dmabuf_size = 0;
+       if (vps->bo)
+               vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
+       vmw_bo_unreference(&vps->bo);
+       vps->bo_size = 0;
 
        vmw_du_plane_cleanup_fb(plane, old_state);
 }
@@ -440,8 +440,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
 
 
        if (!new_fb) {
-               vmw_dmabuf_unreference(&vps->dmabuf);
-               vps->dmabuf_size = 0;
+               vmw_bo_unreference(&vps->bo);
+               vps->bo_size = 0;
 
                return 0;
        }
@@ -449,22 +449,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
        size = new_state->crtc_w * new_state->crtc_h * 4;
        dev_priv = vmw_priv(crtc->dev);
 
-       if (vps->dmabuf) {
-               if (vps->dmabuf_size == size) {
+       if (vps->bo) {
+               if (vps->bo_size == size) {
                        /*
                         * Note that this might temporarily up the pin-count
                         * to 2, until cleanup_fb() is called.
                         */
-                       return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf,
+                       return vmw_bo_pin_in_vram(dev_priv, vps->bo,
                                                      true);
                }
 
-               vmw_dmabuf_unreference(&vps->dmabuf);
-               vps->dmabuf_size = 0;
+               vmw_bo_unreference(&vps->bo);
+               vps->bo_size = 0;
        }
 
-       vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL);
-       if (!vps->dmabuf)
+       vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL);
+       if (!vps->bo)
                return -ENOMEM;
 
        vmw_svga_enable(dev_priv);
@@ -473,22 +473,22 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
         * resume the overlays, this is preferred to failing to alloc.
         */
        vmw_overlay_pause_all(dev_priv);
-       ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size,
+       ret = vmw_bo_init(dev_priv, vps->bo, size,
                              &vmw_vram_ne_placement,
-                             false, &vmw_dmabuf_bo_free);
+                             false, &vmw_bo_bo_free);
        vmw_overlay_resume_all(dev_priv);
        if (ret) {
-               vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
+               vps->bo = NULL; /* vmw_bo_init frees on error */
                return ret;
        }
 
-       vps->dmabuf_size = size;
+       vps->bo_size = size;
 
        /*
         * TTM already thinks the buffer is pinned, but make sure the
         * pin_count is upped.
         */
-       return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true);
+       return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
 }
 
 
@@ -512,10 +512,10 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
                vclips.w = crtc->mode.hdisplay;
                vclips.h = crtc->mode.vdisplay;
 
-               if (vfb->dmabuf)
-                       ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, NULL,
-                                                         &vclips, 1, 1, true,
-                                                         &fence, crtc);
+               if (vfb->bo)
+                       ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
+                                                     &vclips, 1, 1, true,
+                                                     &fence, crtc);
                else
                        ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
                                                           &vclips, NULL, 0, 0,
@@ -775,11 +775,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
        return 0;
 }
 
-static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv,
+static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
                                  struct vmw_framebuffer *framebuffer)
 {
-       struct vmw_dma_buffer *buf =
-               container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+       struct vmw_buffer_object *buf =
+               container_of(framebuffer, struct vmw_framebuffer_bo,
                             base)->buffer;
        int depth = framebuffer->base.format->depth;
        struct {
@@ -970,13 +970,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
 }
 
 /**
- * vmw_sou_dmabuf_fifo_commit - Callback to submit a set of readback clips.
+ * vmw_sou_bo_fifo_commit - Callback to submit a set of readback clips.
  *
  * @dirty: The closure structure.
  *
  * Commits a previously built command buffer of readback clips.
  */
-static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty)
 {
        if (!dirty->num_hits) {
                vmw_fifo_commit(dirty->dev_priv, 0);
@@ -984,20 +984,20 @@ static void vmw_sou_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
        }
 
        vmw_fifo_commit(dirty->dev_priv,
-                       sizeof(struct vmw_kms_sou_dmabuf_blit) *
+                       sizeof(struct vmw_kms_sou_bo_blit) *
                        dirty->num_hits);
 }
 
 /**
- * vmw_sou_dmabuf_clip - Callback to encode a readback cliprect.
+ * vmw_sou_bo_clip - Callback to encode a readback cliprect.
  *
  * @dirty: The closure structure
  *
  * Encodes a BLIT_GMRFB_TO_SCREEN cliprect.
  */
-static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
+static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty)
 {
-       struct vmw_kms_sou_dmabuf_blit *blit = dirty->cmd;
+       struct vmw_kms_sou_bo_blit *blit = dirty->cmd;
 
        blit += dirty->num_hits;
        blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
@@ -1012,10 +1012,10 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
 }
 
 /**
- * vmw_kms_do_dmabuf_dirty - Dirty part of a dma-buffer backed framebuffer
+ * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
  *
  * @dev_priv: Pointer to the device private structure.
- * @framebuffer: Pointer to the dma-buffer backed framebuffer.
+ * @framebuffer: Pointer to the buffer-object backed framebuffer.
  * @clips: Array of clip rects.
  * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  * be NULL.
@@ -1025,12 +1025,12 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
  * @out_fence: If non-NULL, will return a ref-counted pointer to a
  * struct vmw_fence_obj. The returned fence pointer may be NULL in which
  * case the device has already synchronized.
- * @crtc: If crtc is passed, perform dmabuf dirty on that crtc only.
+ * @crtc: If crtc is passed, perform bo dirty on that crtc only.
  *
  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
  * interrupted.
  */
-int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
                                struct vmw_framebuffer *framebuffer,
                                struct drm_clip_rect *clips,
                                struct drm_vmw_rect *vclips,
@@ -1039,8 +1039,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
                                struct vmw_fence_obj **out_fence,
                                struct drm_crtc *crtc)
 {
-       struct vmw_dma_buffer *buf =
-               container_of(framebuffer, struct vmw_framebuffer_dmabuf,
+       struct vmw_buffer_object *buf =
+               container_of(framebuffer, struct vmw_framebuffer_bo,
                             base)->buffer;
        struct vmw_kms_dirty dirty;
        int ret;
@@ -1050,14 +1050,14 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
        if (ret)
                return ret;
 
-       ret = do_dmabuf_define_gmrfb(dev_priv, framebuffer);
+       ret = do_bo_define_gmrfb(dev_priv, framebuffer);
        if (unlikely(ret != 0))
                goto out_revert;
 
        dirty.crtc = crtc;
-       dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
-       dirty.clip = vmw_sou_dmabuf_clip;
-       dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
+       dirty.fifo_commit = vmw_sou_bo_fifo_commit;
+       dirty.clip = vmw_sou_bo_clip;
+       dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_bo_blit) *
                num_clips;
        ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
                                   0, 0, num_clips, increment, &dirty);
@@ -1116,12 +1116,12 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
 
 /**
  * vmw_kms_sou_readback - Perform a readback from the screen object system to
- * a dma-buffer backed framebuffer.
+ * a buffer-object backed framebuffer.
  *
  * @dev_priv: Pointer to the device private structure.
  * @file_priv: Pointer to a struct drm_file identifying the caller.
  * Must be set to NULL if @user_fence_rep is NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
  * @user_fence_rep: User-space provided structure for fence information.
  * Must be set to non-NULL if @file_priv is non-NULL.
  * @vclips: Array of clip rects.
@@ -1139,8 +1139,8 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
                         uint32_t num_clips,
                         struct drm_crtc *crtc)
 {
-       struct vmw_dma_buffer *buf =
-               container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+       struct vmw_buffer_object *buf =
+               container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
        struct vmw_kms_dirty dirty;
        int ret;
 
@@ -1149,7 +1149,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
        if (ret)
                return ret;
 
-       ret = do_dmabuf_define_gmrfb(dev_priv, vfb);
+       ret = do_bo_define_gmrfb(dev_priv, vfb);
        if (unlikely(ret != 0))
                goto out_revert;
 
index 73b8e9a..f6c939f 100644 (file)
@@ -159,7 +159,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
                              SVGA3dShaderType type,
                              uint8_t num_input_sig,
                              uint8_t num_output_sig,
-                             struct vmw_dma_buffer *byte_code,
+                             struct vmw_buffer_object *byte_code,
                              void (*res_free) (struct vmw_resource *res))
 {
        struct vmw_shader *shader = vmw_res_to_shader(res);
@@ -178,7 +178,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
 
        res->backup_size = size;
        if (byte_code) {
-               res->backup = vmw_dmabuf_reference(byte_code);
+               res->backup = vmw_bo_reference(byte_code);
                res->backup_offset = offset;
        }
        shader->size = size;
@@ -723,7 +723,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
 }
 
 static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
-                                struct vmw_dma_buffer *buffer,
+                                struct vmw_buffer_object *buffer,
                                 size_t shader_size,
                                 size_t offset,
                                 SVGA3dShaderType shader_type,
@@ -801,7 +801,7 @@ out:
 
 
 static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
-                                            struct vmw_dma_buffer *buffer,
+                                            struct vmw_buffer_object *buffer,
                                             size_t shader_size,
                                             size_t offset,
                                             SVGA3dShaderType shader_type)
@@ -862,12 +862,12 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct vmw_dma_buffer *buffer = NULL;
+       struct vmw_buffer_object *buffer = NULL;
        SVGA3dShaderType shader_type;
        int ret;
 
        if (buffer_handle != SVGA3D_INVALID_ID) {
-               ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
+               ret = vmw_user_bo_lookup(tfile, buffer_handle,
                                             &buffer, NULL);
                if (unlikely(ret != 0)) {
                        DRM_ERROR("Could not find buffer for shader "
@@ -906,7 +906,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
 
        ttm_read_unlock(&dev_priv->reservation_sem);
 out_bad_arg:
-       vmw_dmabuf_unreference(&buffer);
+       vmw_bo_unreference(&buffer);
        return ret;
 }
 
@@ -983,7 +983,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
                          struct list_head *list)
 {
        struct ttm_operation_ctx ctx = { false, true };
-       struct vmw_dma_buffer *buf;
+       struct vmw_buffer_object *buf;
        struct ttm_bo_kmap_obj map;
        bool is_iomem;
        int ret;
@@ -997,8 +997,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
        if (unlikely(!buf))
                return -ENOMEM;
 
-       ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
-                             true, vmw_dmabuf_bo_free);
+       ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement,
+                             true, vmw_bo_bo_free);
        if (unlikely(ret != 0))
                goto out;
 
@@ -1031,7 +1031,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
                                 res, list);
        vmw_resource_unreference(&res);
 no_reserve:
-       vmw_dmabuf_unreference(&buf);
+       vmw_bo_unreference(&buf);
 out:
        return ret;
 }
index 152e96c..537df90 100644 (file)
@@ -44,7 +44,7 @@
 enum stdu_content_type {
        SAME_AS_DISPLAY = 0,
        SEPARATE_SURFACE,
-       SEPARATE_DMA
+       SEPARATE_BO
 };
 
 /**
@@ -58,7 +58,7 @@ enum stdu_content_type {
  * @bottom: Bottom side of bounding box.
  * @fb_left: Left side of the framebuffer/content bounding box
  * @fb_top: Top of the framebuffer/content bounding box
- * @buf: DMA buffer when DMA-ing between buffer and screen targets.
+ * @buf: buffer object when DMA-ing between buffer and screen targets.
  * @sid: Surface ID when copying between surface and screen targets.
  */
 struct vmw_stdu_dirty {
@@ -68,7 +68,7 @@ struct vmw_stdu_dirty {
        s32 fb_left, fb_top;
        u32 pitch;
        union {
-               struct vmw_dma_buffer *buf;
+               struct vmw_buffer_object *buf;
                u32 sid;
        };
 };
@@ -508,14 +508,14 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
 
 
 /**
- * vmw_stdu_dmabuf_clip - Callback to encode a suface DMA command cliprect
+ * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
  *
  * @dirty: The closure structure.
  *
  * Encodes a surface DMA command cliprect and updates the bounding box
  * for the DMA.
  */
-static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty)
 {
        struct vmw_stdu_dirty *ddirty =
                container_of(dirty, struct vmw_stdu_dirty, base);
@@ -543,14 +543,14 @@ static void vmw_stdu_dmabuf_clip(struct vmw_kms_dirty *dirty)
 }
 
 /**
- * vmw_stdu_dmabuf_fifo_commit - Callback to fill in and submit a DMA command.
+ * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command.
  *
  * @dirty: The closure structure.
  *
  * Fills in the missing fields in a DMA command, and optionally encodes
  * a screen target update command, depending on transfer direction.
  */
-static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
 {
        struct vmw_stdu_dirty *ddirty =
                container_of(dirty, struct vmw_stdu_dirty, base);
@@ -594,13 +594,13 @@ static void vmw_stdu_dmabuf_fifo_commit(struct vmw_kms_dirty *dirty)
 
 
 /**
- * vmw_stdu_dmabuf_cpu_clip - Callback to encode a CPU blit
+ * vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit
  *
  * @dirty: The closure structure.
  *
  * This function calculates the bounding box for all the incoming clips.
  */
-static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_cpu_clip(struct vmw_kms_dirty *dirty)
 {
        struct vmw_stdu_dirty *ddirty =
                container_of(dirty, struct vmw_stdu_dirty, base);
@@ -624,14 +624,14 @@ static void vmw_stdu_dmabuf_cpu_clip(struct vmw_kms_dirty *dirty)
 
 
 /**
- * vmw_stdu_dmabuf_cpu_commit - Callback to do a CPU blit from DMAbuf
+ * vmw_stdu_bo_cpu_commit - Callback to do a CPU blit from buffer object
  *
  * @dirty: The closure structure.
  *
  * For the special case when we cannot create a proxy surface in a
  * 2D VM, we have to do a CPU blit ourselves.
  */
-static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
+static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
 {
        struct vmw_stdu_dirty *ddirty =
                container_of(dirty, struct vmw_stdu_dirty, base);
@@ -652,7 +652,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
        if (width == 0 || height == 0)
                return;
 
-       /* Assume we are blitting from Guest (dmabuf) to Host (display_srf) */
+       /* Assume we are blitting from Guest (bo) to Host (display_srf) */
        dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
        dst_bo = &stdu->display_srf->res.backup->base;
        dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
@@ -712,13 +712,13 @@ out_cleanup:
 }
 
 /**
- * vmw_kms_stdu_dma - Perform a DMA transfer between a dma-buffer backed
+ * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed
  * framebuffer and the screen target system.
  *
  * @dev_priv: Pointer to the device private structure.
  * @file_priv: Pointer to a struct drm-file identifying the caller. May be
  * set to NULL, but then @user_fence_rep must also be set to NULL.
- * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @vfb: Pointer to the buffer-object backed framebuffer.
  * @clips: Array of clip rects. Either @clips or @vclips must be NULL.
  * @vclips: Alternate array of clip rects. Either @clips or @vclips must
  * be NULL.
@@ -747,8 +747,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
                     bool interruptible,
                     struct drm_crtc *crtc)
 {
-       struct vmw_dma_buffer *buf =
-               container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
+       struct vmw_buffer_object *buf =
+               container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
        struct vmw_stdu_dirty ddirty;
        int ret;
        bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
@@ -770,8 +770,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
        ddirty.fb_left = ddirty.fb_top = S32_MAX;
        ddirty.pitch = vfb->base.pitches[0];
        ddirty.buf = buf;
-       ddirty.base.fifo_commit = vmw_stdu_dmabuf_fifo_commit;
-       ddirty.base.clip = vmw_stdu_dmabuf_clip;
+       ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit;
+       ddirty.base.clip = vmw_stdu_bo_clip;
        ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
                num_clips * sizeof(SVGA3dCopyBox) +
                sizeof(SVGA3dCmdSurfaceDMASuffix);
@@ -780,8 +780,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
 
 
        if (cpu_blit) {
-               ddirty.base.fifo_commit = vmw_stdu_dmabuf_cpu_commit;
-               ddirty.base.clip = vmw_stdu_dmabuf_cpu_clip;
+               ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
+               ddirty.base.clip = vmw_stdu_bo_cpu_clip;
                ddirty.base.fifo_reserve_size = 0;
        }
 
@@ -927,7 +927,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
        if (ret)
                return ret;
 
-       if (vfbs->is_dmabuf_proxy) {
+       if (vfbs->is_bo_proxy) {
                ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
                if (ret)
                        goto out_finish;
@@ -1075,7 +1075,7 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
  * @new_state: info on the new plane state, including the FB
  *
  * This function allocates a new display surface if the content is
- * backed by a DMA.  The display surface is pinned here, and it'll
+ * backed by a buffer object.  The display surface is pinned here, and it'll
  * be unpinned in .cleanup_fb()
  *
  * Returns 0 on success
@@ -1105,13 +1105,13 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
        }
 
        vfb = vmw_framebuffer_to_vfb(new_fb);
-       new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
+       new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
 
        if (new_vfbs && new_vfbs->surface->base_size.width == hdisplay &&
            new_vfbs->surface->base_size.height == vdisplay)
                new_content_type = SAME_AS_DISPLAY;
-       else if (vfb->dmabuf)
-               new_content_type = SEPARATE_DMA;
+       else if (vfb->bo)
+               new_content_type = SEPARATE_BO;
        else
                new_content_type = SEPARATE_SURFACE;
 
@@ -1124,10 +1124,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
                display_base_size.depth  = 1;
 
                /*
-                * If content buffer is a DMA buf, then we have to construct
-                * surface info
+                * If content buffer is a buffer object, then we have to
+                * construct surface info
                 */
-               if (new_content_type == SEPARATE_DMA) {
+               if (new_content_type == SEPARATE_BO) {
 
                        switch (new_fb->format->cpp[0]*8) {
                        case 32:
@@ -1212,12 +1212,12 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
        vps->content_fb_type = new_content_type;
 
        /*
-        * This should only happen if the DMA buf is too large to create a
+        * This should only happen if the buffer object is too large to create a
         * proxy surface for.
-        * If we are a 2D VM with a DMA buffer then we have to use CPU blit
+        * If we are a 2D VM with a buffer object then we have to use CPU blit
         * so cache these mappings
         */
-       if (vps->content_fb_type == SEPARATE_DMA &&
+       if (vps->content_fb_type == SEPARATE_BO &&
            !(dev_priv->capabilities & SVGA_CAP_3D))
                vps->cpp = new_fb->pitches[0] / new_fb->width;
 
@@ -1276,7 +1276,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
                if (ret)
                        DRM_ERROR("Failed to bind surface to STDU.\n");
 
-               if (vfb->dmabuf)
+               if (vfb->bo)
                        ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
                                               &vclips, 1, 1, true, false,
                                               crtc);
index b236c48..2b2e8aa 100644 (file)
@@ -842,12 +842,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
        if (dev_priv->has_mob && req->shareable) {
                uint32_t backup_handle;
 
-               ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
-                                           res->backup_size,
-                                           true,
-                                           &backup_handle,
-                                           &res->backup,
-                                           &user_srf->backup_base);
+               ret = vmw_user_bo_alloc(dev_priv, tfile,
+                                       res->backup_size,
+                                       true,
+                                       &backup_handle,
+                                       &res->backup,
+                                       &user_srf->backup_base);
                if (unlikely(ret != 0)) {
                        vmw_resource_unreference(&res);
                        goto out_unlock;
@@ -1317,14 +1317,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
 
 
        if (req->buffer_handle != SVGA3D_INVALID_ID) {
-               ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
-                                            &res->backup,
-                                            &user_srf->backup_base);
+               ret = vmw_user_bo_lookup(tfile, req->buffer_handle,
+                                        &res->backup,
+                                        &user_srf->backup_base);
                if (ret == 0) {
                        if (res->backup->base.num_pages * PAGE_SIZE <
                            res->backup_size) {
                                DRM_ERROR("Surface backup buffer is too small.\n");
-                               vmw_dmabuf_unreference(&res->backup);
+                               vmw_bo_unreference(&res->backup);
                                ret = -EINVAL;
                                goto out_unlock;
                        } else {
@@ -1332,13 +1332,13 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
                        }
                }
        } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
-               ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
-                                           res->backup_size,
-                                           req->drm_surface_flags &
-                                           drm_vmw_surface_flag_shareable,
-                                           &backup_handle,
-                                           &res->backup,
-                                           &user_srf->backup_base);
+               ret = vmw_user_bo_alloc(dev_priv, tfile,
+                                       res->backup_size,
+                                       req->drm_surface_flags &
+                                       drm_vmw_surface_flag_shareable,
+                                       &backup_handle,
+                                       &res->backup,
+                                       &user_srf->backup_base);
 
        if (unlikely(ret != 0)) {
                vmw_resource_unreference(&res);
@@ -1414,8 +1414,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
        }
 
        mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
-       ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
-                                       &backup_handle);
+       ret = vmw_user_bo_reference(tfile, srf->res.backup, &backup_handle);
        mutex_unlock(&dev_priv->cmdbuf_mutex);
 
        if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
new file mode 100644 (file)
index 0000000..0931f43
--- /dev/null
@@ -0,0 +1,887 @@
+/**************************************************************************
+ *
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_page_alloc.h>
+
+static const struct ttm_place vram_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+};
+
+static const struct ttm_place vram_ne_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
+static const struct ttm_place sys_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+};
+
+static const struct ttm_place sys_ne_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
+static const struct ttm_place gmr_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+};
+
+static const struct ttm_place gmr_ne_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
+static const struct ttm_place mob_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+};
+
+static const struct ttm_place mob_ne_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
+struct ttm_placement vmw_vram_placement = {
+       .num_placement = 1,
+       .placement = &vram_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &vram_placement_flags
+};
+
+static const struct ttm_place vram_gmr_placement_flags[] = {
+       {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+       }
+};
+
+static const struct ttm_place gmr_vram_placement_flags[] = {
+       {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+       }
+};
+
+struct ttm_placement vmw_vram_gmr_placement = {
+       .num_placement = 2,
+       .placement = vram_gmr_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &gmr_placement_flags
+};
+
+static const struct ttm_place vram_gmr_ne_placement_flags[] = {
+       {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED |
+                        TTM_PL_FLAG_NO_EVICT
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED |
+                        TTM_PL_FLAG_NO_EVICT
+       }
+};
+
+struct ttm_placement vmw_vram_gmr_ne_placement = {
+       .num_placement = 2,
+       .placement = vram_gmr_ne_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &gmr_ne_placement_flags
+};
+
+struct ttm_placement vmw_vram_sys_placement = {
+       .num_placement = 1,
+       .placement = &vram_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &sys_placement_flags
+};
+
+struct ttm_placement vmw_vram_ne_placement = {
+       .num_placement = 1,
+       .placement = &vram_ne_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &vram_ne_placement_flags
+};
+
+struct ttm_placement vmw_sys_placement = {
+       .num_placement = 1,
+       .placement = &sys_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &sys_placement_flags
+};
+
+struct ttm_placement vmw_sys_ne_placement = {
+       .num_placement = 1,
+       .placement = &sys_ne_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &sys_ne_placement_flags
+};
+
+static const struct ttm_place evictable_placement_flags[] = {
+       {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+       }
+};
+
+static const struct ttm_place nonfixed_placement_flags[] = {
+       {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+       }, {
+               .fpfn = 0,
+               .lpfn = 0,
+               .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+       }
+};
+
+struct ttm_placement vmw_evictable_placement = {
+       .num_placement = 4,
+       .placement = evictable_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &sys_placement_flags
+};
+
+struct ttm_placement vmw_srf_placement = {
+       .num_placement = 1,
+       .num_busy_placement = 2,
+       .placement = &gmr_placement_flags,
+       .busy_placement = gmr_vram_placement_flags
+};
+
+struct ttm_placement vmw_mob_placement = {
+       .num_placement = 1,
+       .num_busy_placement = 1,
+       .placement = &mob_placement_flags,
+       .busy_placement = &mob_placement_flags
+};
+
+struct ttm_placement vmw_mob_ne_placement = {
+       .num_placement = 1,
+       .num_busy_placement = 1,
+       .placement = &mob_ne_placement_flags,
+       .busy_placement = &mob_ne_placement_flags
+};
+
+struct ttm_placement vmw_nonfixed_placement = {
+       .num_placement = 3,
+       .placement = nonfixed_placement_flags,
+       .num_busy_placement = 1,
+       .busy_placement = &sys_placement_flags
+};
+
+struct vmw_ttm_tt {
+       struct ttm_dma_tt dma_ttm;
+       struct vmw_private *dev_priv;
+       int gmr_id;
+       struct vmw_mob *mob;
+       int mem_type;
+       struct sg_table sgt;
+       struct vmw_sg_table vsgt;
+       uint64_t sg_alloc_size;
+       bool mapped;
+};
+
+const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
+
+/**
+ * Helper functions to advance a struct vmw_piter iterator.
+ *
+ * @viter: Pointer to the iterator.
+ *
+ * These functions return false if past the end of the list,
+ * true otherwise. Functions are selected depending on the current
+ * DMA mapping mode.
+ */
+static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
+{
+       return ++(viter->i) < viter->num_pages;
+}
+
+static bool __vmw_piter_sg_next(struct vmw_piter *viter)
+{
+       return __sg_page_iter_next(&viter->iter);
+}
+
+
+/**
+ * Helper functions to return a pointer to the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * These functions return a pointer to the page currently
+ * pointed to by @viter. Functions are selected depending on the
+ * current mapping mode.
+ */
+static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
+{
+       return viter->pages[viter->i];
+}
+
+static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
+{
+       return sg_page_iter_page(&viter->iter);
+}
+
+
+/**
+ * Helper functions to return the DMA address of the current page.
+ *
+ * @viter: Pointer to the iterator
+ *
+ * These functions return the DMA address of the page currently
+ * pointed to by @viter. Functions are selected depending on the
+ * current mapping mode.
+ */
+static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
+{
+       return page_to_phys(viter->pages[viter->i]);
+}
+
+static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
+{
+       return viter->addrs[viter->i];
+}
+
+static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
+{
+       return sg_page_iter_dma_address(&viter->iter);
+}
+
+
+/**
+ * vmw_piter_start - Initialize a struct vmw_piter.
+ *
+ * @viter: Pointer to the iterator to initialize
+ * @vsgt: Pointer to a struct vmw_sg_table to initialize from
+ *
+ * Note that we're following the convention of __sg_page_iter_start, so that
+ * the iterator doesn't point to a valid page after initialization; it has
+ * to be advanced one step first.
+ */
+void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
+                    unsigned long p_offset)
+{
+       viter->i = p_offset - 1;
+       viter->num_pages = vsgt->num_pages;
+       switch (vsgt->mode) {
+       case vmw_dma_phys:
+               viter->next = &__vmw_piter_non_sg_next;
+               viter->dma_address = &__vmw_piter_phys_addr;
+               viter->page = &__vmw_piter_non_sg_page;
+               viter->pages = vsgt->pages;
+               break;
+       case vmw_dma_alloc_coherent:
+               viter->next = &__vmw_piter_non_sg_next;
+               viter->dma_address = &__vmw_piter_dma_addr;
+               viter->page = &__vmw_piter_non_sg_page;
+               viter->addrs = vsgt->addrs;
+               viter->pages = vsgt->pages;
+               break;
+       case vmw_dma_map_populate:
+       case vmw_dma_map_bind:
+               viter->next = &__vmw_piter_sg_next;
+               viter->dma_address = &__vmw_piter_sg_addr;
+               viter->page = &__vmw_piter_sg_page;
+               __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
+                                    vsgt->sgt->orig_nents, p_offset);
+               break;
+       default:
+               BUG();
+       }
+}
+
+/**
+ * vmw_ttm_unmap_from_dma - unmap  device addresses previsouly mapped for
+ * TTM pages
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_backend
+ *
+ * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
+ */
+static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
+{
+       struct device *dev = vmw_tt->dev_priv->dev->dev;
+
+       dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
+               DMA_BIDIRECTIONAL);
+       vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
+}
+
+/**
+ * vmw_ttm_map_for_dma - map TTM pages to get device addresses
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_backend
+ *
+ * This function is used to get device addresses from the kernel DMA layer.
+ * However, it's violating the DMA API in that when this operation has been
+ * performed, it's illegal for the CPU to write to the pages without first
+ * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
+ * therefore only legal to call this function if we know that the function
+ * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
+ * a CPU write buffer flush.
+ */
+static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
+{
+       struct device *dev = vmw_tt->dev_priv->dev->dev;
+       int ret;
+
+       ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
+                        DMA_BIDIRECTIONAL);
+       if (unlikely(ret == 0))
+               return -ENOMEM;
+
+       vmw_tt->sgt.nents = ret;
+
+       return 0;
+}
+
+/**
+ * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_tt
+ *
+ * Select the correct function for and make sure the TTM pages are
+ * visible to the device. Allocate storage for the device mappings.
+ * If a mapping has already been performed, indicated by the storage
+ * pointer being non NULL, the function returns success.
+ */
+static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
+{
+       struct vmw_private *dev_priv = vmw_tt->dev_priv;
+       struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+       struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
+       struct ttm_operation_ctx ctx = {
+               .interruptible = true,
+               .no_wait_gpu = false
+       };
+       struct vmw_piter iter;
+       dma_addr_t old;
+       int ret = 0;
+       static size_t sgl_size;
+       static size_t sgt_size;
+
+       if (vmw_tt->mapped)
+               return 0;
+
+       vsgt->mode = dev_priv->map_mode;
+       vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
+       vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
+       vsgt->addrs = vmw_tt->dma_ttm.dma_address;
+       vsgt->sgt = &vmw_tt->sgt;
+
+       switch (dev_priv->map_mode) {
+       case vmw_dma_map_bind:
+       case vmw_dma_map_populate:
+               if (unlikely(!sgl_size)) {
+                       sgl_size = ttm_round_pot(sizeof(struct scatterlist));
+                       sgt_size = ttm_round_pot(sizeof(struct sg_table));
+               }
+               vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
+               ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, &ctx);
+               if (unlikely(ret != 0))
+                       return ret;
+
+               ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
+                                               vsgt->num_pages, 0,
+                                               (unsigned long)
+                                               vsgt->num_pages << PAGE_SHIFT,
+                                               GFP_KERNEL);
+               if (unlikely(ret != 0))
+                       goto out_sg_alloc_fail;
+
+               if (vsgt->num_pages > vmw_tt->sgt.nents) {
+                       uint64_t over_alloc =
+                               sgl_size * (vsgt->num_pages -
+                                           vmw_tt->sgt.nents);
+
+                       ttm_mem_global_free(glob, over_alloc);
+                       vmw_tt->sg_alloc_size -= over_alloc;
+               }
+
+               ret = vmw_ttm_map_for_dma(vmw_tt);
+               if (unlikely(ret != 0))
+                       goto out_map_fail;
+
+               break;
+       default:
+               break;
+       }
+
+       old = ~((dma_addr_t) 0);
+       vmw_tt->vsgt.num_regions = 0;
+       for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
+               dma_addr_t cur = vmw_piter_dma_addr(&iter);
+
+               if (cur != old + PAGE_SIZE)
+                       vmw_tt->vsgt.num_regions++;
+               old = cur;
+       }
+
+       vmw_tt->mapped = true;
+       return 0;
+
+out_map_fail:
+       sg_free_table(vmw_tt->vsgt.sgt);
+       vmw_tt->vsgt.sgt = NULL;
+out_sg_alloc_fail:
+       ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
+       return ret;
+}
+
+/**
+ * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
+ *
+ * @vmw_tt: Pointer to a struct vmw_ttm_tt
+ *
+ * Tear down any previously set up device DMA mappings and free
+ * any storage space allocated for them. If there are no mappings set up,
+ * this function is a NOP.
+ */
+static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
+{
+       struct vmw_private *dev_priv = vmw_tt->dev_priv;
+
+       if (!vmw_tt->vsgt.sgt)
+               return;
+
+       switch (dev_priv->map_mode) {
+       case vmw_dma_map_bind:
+       case vmw_dma_map_populate:
+               vmw_ttm_unmap_from_dma(vmw_tt);
+               sg_free_table(vmw_tt->vsgt.sgt);
+               vmw_tt->vsgt.sgt = NULL;
+               ttm_mem_global_free(vmw_mem_glob(dev_priv),
+                                   vmw_tt->sg_alloc_size);
+               break;
+       default:
+               break;
+       }
+       vmw_tt->mapped = false;
+}
+
+
+/**
+ * vmw_bo_map_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_map_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ * Note that the buffer object must be either pinned or reserved before
+ * calling this function.
+ */
+int vmw_bo_map_dma(struct ttm_buffer_object *bo)
+{
+       struct vmw_ttm_tt *vmw_tt =
+               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+       return vmw_ttm_map_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_unmap_dma - Make sure buffer object pages are visible to the device
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Wrapper around vmw_ttm_unmap_dma, that takes a TTM buffer object pointer
+ * instead of a pointer to a struct vmw_ttm_backend as argument.
+ */
+void vmw_bo_unmap_dma(struct ttm_buffer_object *bo)
+{
+       struct vmw_ttm_tt *vmw_tt =
+               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+       vmw_ttm_unmap_dma(vmw_tt);
+}
+
+
+/**
+ * vmw_bo_sg_table - Return a struct vmw_sg_table object for a
+ * TTM buffer object
+ *
+ * @bo: Pointer to a struct ttm_buffer_object
+ *
+ * Returns a pointer to a struct vmw_sg_table object. The object should
+ * not be freed after use.
+ * Note that for the device addresses to be valid, the buffer object must
+ * either be reserved or pinned.
+ */
+const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
+{
+       struct vmw_ttm_tt *vmw_tt =
+               container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+       return &vmw_tt->vsgt;
+}
+
+
+static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+       struct vmw_ttm_tt *vmw_be =
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+       int ret;
+
+       ret = vmw_ttm_map_dma(vmw_be);
+       if (unlikely(ret != 0))
+               return ret;
+
+       vmw_be->gmr_id = bo_mem->start;
+       vmw_be->mem_type = bo_mem->mem_type;
+
+       switch (bo_mem->mem_type) {
+       case VMW_PL_GMR:
+               return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
+                                   ttm->num_pages, vmw_be->gmr_id);
+       case VMW_PL_MOB:
+               if (unlikely(vmw_be->mob == NULL)) {
+                       vmw_be->mob =
+                               vmw_mob_create(ttm->num_pages);
+                       if (unlikely(vmw_be->mob == NULL))
+                               return -ENOMEM;
+               }
+
+               return vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
+                                   &vmw_be->vsgt, ttm->num_pages,
+                                   vmw_be->gmr_id);
+       default:
+               BUG();
+       }
+       return 0;
+}
+
+static int vmw_ttm_unbind(struct ttm_tt *ttm)
+{
+       struct vmw_ttm_tt *vmw_be =
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+       switch (vmw_be->mem_type) {
+       case VMW_PL_GMR:
+               vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
+               break;
+       case VMW_PL_MOB:
+               vmw_mob_unbind(vmw_be->dev_priv, vmw_be->mob);
+               break;
+       default:
+               BUG();
+       }
+
+       if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
+               vmw_ttm_unmap_dma(vmw_be);
+
+       return 0;
+}
+
+
+static void vmw_ttm_destroy(struct ttm_tt *ttm)
+{
+       struct vmw_ttm_tt *vmw_be =
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+
+       vmw_ttm_unmap_dma(vmw_be);
+       if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+               ttm_dma_tt_fini(&vmw_be->dma_ttm);
+       else
+               ttm_tt_fini(ttm);
+
+       if (vmw_be->mob)
+               vmw_mob_destroy(vmw_be->mob);
+
+       kfree(vmw_be);
+}
+
+
+static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
+{
+       struct vmw_ttm_tt *vmw_tt =
+               container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+       struct vmw_private *dev_priv = vmw_tt->dev_priv;
+       struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+       int ret;
+
+       if (ttm->state != tt_unpopulated)
+               return 0;
+
+       if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
+               size_t size =
+                       ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
+               ret = ttm_mem_global_alloc(glob, size, ctx);
+               if (unlikely(ret != 0))
+                       return ret;
+
+               ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
+                                       ctx);
+               if (unlikely(ret != 0))
+                       ttm_mem_global_free(glob, size);
+       } else
+               ret = ttm_pool_populate(ttm, ctx);
+
+       return ret;
+}
+
+static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
+{
+       struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
+                                                dma_ttm.ttm);
+       struct vmw_private *dev_priv = vmw_tt->dev_priv;
+       struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
+
+
+       if (vmw_tt->mob) {
+               vmw_mob_destroy(vmw_tt->mob);
+               vmw_tt->mob = NULL;
+       }
+
+       vmw_ttm_unmap_dma(vmw_tt);
+       if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
+               size_t size =
+                       ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
+
+               ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+               ttm_mem_global_free(glob, size);
+       } else
+               ttm_pool_unpopulate(ttm);
+}
+
+static struct ttm_backend_func vmw_ttm_func = {
+       .bind = vmw_ttm_bind,
+       .unbind = vmw_ttm_unbind,
+       .destroy = vmw_ttm_destroy,
+};
+
+static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
+                                       uint32_t page_flags)
+{
+       struct vmw_ttm_tt *vmw_be;
+       int ret;
+
+       vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
+       if (!vmw_be)
+               return NULL;
+
+       vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
+       vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
+       vmw_be->mob = NULL;
+
+       if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
+               ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
+       else
+               ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
+       if (unlikely(ret != 0))
+               goto out_no_init;
+
+       return &vmw_be->dma_ttm.ttm;
+out_no_init:
+       kfree(vmw_be);
+       return NULL;
+}
+
+static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
+{
+       return 0;
+}
+
+static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
+                     struct ttm_mem_type_manager *man)
+{
+       switch (type) {
+       case TTM_PL_SYSTEM:
+               /* System memory */
+
+               man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_FLAG_CACHED;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+               break;
+       case TTM_PL_VRAM:
+               /* "On-card" video ram */
+               man->func = &ttm_bo_manager_func;
+               man->gpu_offset = 0;
+               man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_FLAG_CACHED;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+               break;
+       case VMW_PL_GMR:
+       case VMW_PL_MOB:
+               /*
+                * "Guest Memory Regions" is an aperture like feature with
+                *  one slot per bo. There is an upper limit of the number of
+                *  slots as well as the bo size.
+                */
+               man->func = &vmw_gmrid_manager_func;
+               man->gpu_offset = 0;
+               man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
+               man->available_caching = TTM_PL_FLAG_CACHED;
+               man->default_caching = TTM_PL_FLAG_CACHED;
+               break;
+       default:
+               DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void vmw_evict_flags(struct ttm_buffer_object *bo,
+                    struct ttm_placement *placement)
+{
+       *placement = vmw_sys_placement;
+}
+
+static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+{
+       struct ttm_object_file *tfile =
+               vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
+
+       return vmw_user_bo_verify_access(bo, tfile);
+}
+
+static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+       struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+       struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+
+       mem->bus.addr = NULL;
+       mem->bus.is_iomem = false;
+       mem->bus.offset = 0;
+       mem->bus.size = mem->num_pages << PAGE_SHIFT;
+       mem->bus.base = 0;
+       if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+               return -EINVAL;
+       switch (mem->mem_type) {
+       case TTM_PL_SYSTEM:
+       case VMW_PL_GMR:
+       case VMW_PL_MOB:
+               return 0;
+       case TTM_PL_VRAM:
+               mem->bus.offset = mem->start << PAGE_SHIFT;
+               mem->bus.base = dev_priv->vram_start;
+               mem->bus.is_iomem = true;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+{
+}
+
+static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+       return 0;
+}
+
+/**
+ * vmw_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The struct ttm_mem_reg indicating to what memory
+ *       region the move is taking place.
+ *
+ * Calls move_notify for all subsystems needing it.
+ * (currently only resources).
+ */
+static void vmw_move_notify(struct ttm_buffer_object *bo,
+                           bool evict,
+                           struct ttm_mem_reg *mem)
+{
+       vmw_resource_move_notify(bo, mem);
+       vmw_query_move_notify(bo, mem);
+}
+
+
+/**
+ * vmw_swap_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to be swapped out.
+ */
+static void vmw_swap_notify(struct ttm_buffer_object *bo)
+{
+       vmw_resource_swap_notify(bo);
+       (void) ttm_bo_wait(bo, false, false);
+}
+
+
+struct ttm_bo_driver vmw_bo_driver = {
+       .ttm_tt_create = &vmw_ttm_tt_create,
+       .ttm_tt_populate = &vmw_ttm_populate,
+       .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
+       .invalidate_caches = vmw_invalidate_caches,
+       .init_mem_type = vmw_init_mem_type,
+       .eviction_valuable = ttm_bo_eviction_valuable,
+       .evict_flags = vmw_evict_flags,
+       .move = NULL,
+       .verify_access = vmw_verify_access,
+       .move_notify = vmw_move_notify,
+       .swap_notify = vmw_swap_notify,
+       .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
+       .io_mem_reserve = &vmw_ttm_io_mem_reserve,
+       .io_mem_free = &vmw_ttm_io_mem_free,
+};
index 0bc784f..57115a5 100644 (file)
@@ -40,6 +40,7 @@ extern "C" {
 
 #define DRM_VMW_GET_PARAM            0
 #define DRM_VMW_ALLOC_DMABUF         1
+#define DRM_VMW_ALLOC_BO             1
 #define DRM_VMW_UNREF_DMABUF         2
 #define DRM_VMW_HANDLE_CLOSE         2
 #define DRM_VMW_CURSOR_BYPASS        3
@@ -356,9 +357,9 @@ struct drm_vmw_fence_rep {
 
 /*************************************************************************/
 /**
- * DRM_VMW_ALLOC_DMABUF
+ * DRM_VMW_ALLOC_BO
  *
- * Allocate a DMA buffer that is visible also to the host.
+ * Allocate a buffer object that is visible also to the host.
  * NOTE: The buffer is
  * identified by a handle and an offset, which are private to the guest, but
  * useable in the command stream. The guest kernel may translate these
@@ -366,27 +367,28 @@ struct drm_vmw_fence_rep {
  * be zero at all times, or it may disappear from the interface before it is
  * fixed.
  *
- * The DMA buffer may stay user-space mapped in the guest at all times,
+ * The buffer object may stay user-space mapped in the guest at all times,
  * and is thus suitable for sub-allocation.
  *
- * DMA buffers are mapped using the mmap() syscall on the drm device.
+ * Buffer objects are mapped using the mmap() syscall on the drm device.
  */
 
 /**
- * struct drm_vmw_alloc_dmabuf_req
+ * struct drm_vmw_alloc_bo_req
  *
  * @size: Required minimum size of the buffer.
  *
- * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Input data to the DRM_VMW_ALLOC_BO Ioctl.
  */
 
-struct drm_vmw_alloc_dmabuf_req {
+struct drm_vmw_alloc_bo_req {
        __u32 size;
        __u32 pad64;
 };
+#define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
 
 /**
- * struct drm_vmw_dmabuf_rep
+ * struct drm_vmw_bo_rep
  *
  * @map_handle: Offset to use in the mmap() call used to map the buffer.
  * @handle: Handle unique to this buffer. Used for unreferencing.
@@ -395,50 +397,32 @@ struct drm_vmw_alloc_dmabuf_req {
  * @cur_gmr_offset: Offset to use in the command stream when this buffer is
  * referenced. See note above.
  *
- * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Output data from the DRM_VMW_ALLOC_BO Ioctl.
  */
 
-struct drm_vmw_dmabuf_rep {
+struct drm_vmw_bo_rep {
        __u64 map_handle;
        __u32 handle;
        __u32 cur_gmr_id;
        __u32 cur_gmr_offset;
        __u32 pad64;
 };
+#define drm_vmw_dmabuf_rep drm_vmw_bo_rep
 
 /**
- * union drm_vmw_dmabuf_arg
+ * union drm_vmw_alloc_bo_arg
  *
  * @req: Input data as described above.
  * @rep: Output data as described above.
  *
- * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Argument to the DRM_VMW_ALLOC_BO Ioctl.
  */
 
-union drm_vmw_alloc_dmabuf_arg {
-       struct drm_vmw_alloc_dmabuf_req req;
-       struct drm_vmw_dmabuf_rep rep;
-};
-
-/*************************************************************************/
-/**
- * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
- *
- */
-
-/**
- * struct drm_vmw_unref_dmabuf_arg
- *
- * @handle: Handle indicating what buffer to free. Obtained from the
- * DRM_VMW_ALLOC_DMABUF Ioctl.
- *
- * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
- */
-
-struct drm_vmw_unref_dmabuf_arg {
-       __u32 handle;
-       __u32 pad64;
+union drm_vmw_alloc_bo_arg {
+       struct drm_vmw_alloc_bo_req req;
+       struct drm_vmw_bo_rep rep;
 };
+#define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
 
 /*************************************************************************/
 /**
@@ -1103,9 +1087,8 @@ union drm_vmw_extended_context_arg {
  * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
  * underlying resource.
  *
- * Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl.
- * The ioctl arguments therefore need to be identical in layout.
- *
+ * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
+ * Ioctl.
  */
 
 /**
@@ -1119,7 +1102,7 @@ struct drm_vmw_handle_close_arg {
        __u32 handle;
        __u32 pad64;
 };
-
+#define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
 
 #if defined(__cplusplus)
 }