ret = ttm_range_man_init(&dev_priv->bdev, man,
dev_priv->vram_size >> PAGE_SHIFT);
#endif
- dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+ ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM)->use_type = false;
return ret;
}
vmw_thp_fini(dev_priv);
#else
ttm_bo_man_fini(&dev_priv->bdev,
- &dev_priv->bdev.man[TTM_PL_VRAM]);
+ ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM));
#endif
}
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
goto out_no_bdev;
}
- dev_priv->bdev.man[TTM_PL_SYSTEM].available_caching =
+ ttm_manager_type(&dev_priv->bdev, TTM_PL_SYSTEM)->available_caching =
TTM_PL_FLAG_CACHED;
/*
*/
static void __vmw_svga_enable(struct vmw_private *dev_priv)
{
+ struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
+
spin_lock(&dev_priv->svga_lock);
- if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+ if (!man->use_type) {
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
- dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
+ man->use_type = true;
}
spin_unlock(&dev_priv->svga_lock);
}
*/
static void __vmw_svga_disable(struct vmw_private *dev_priv)
{
+ struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
+
spin_lock(&dev_priv->svga_lock);
- if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
- dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+ if (man->use_type) {
+ man->use_type = false;
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
SVGA_REG_ENABLE_ENABLE);
*/
void vmw_svga_disable(struct vmw_private *dev_priv)
{
+ struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
/*
* Disabling SVGA will turn off device modesetting capabilities, so
* notify KMS about that so that it doesn't cache atomic state that
vmw_kms_lost_device(dev_priv->dev);
ttm_write_lock(&dev_priv->reservation_sem, false);
spin_lock(&dev_priv->svga_lock);
- if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
- dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+ if (man->use_type) {
+ man->use_type = false;
spin_unlock(&dev_priv->svga_lock);
if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
DRM_ERROR("Failed evicting VRAM buffers.\n");
int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type)
{
- struct ttm_mem_type_manager *man = &dev_priv->bdev.man[type];
+ struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, type);
struct vmwgfx_gmrid_man *gman =
kzalloc(sizeof(*gman), GFP_KERNEL);
void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
{
- struct ttm_mem_type_manager *man = &dev_priv->bdev.man[type];
+ struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, type);
struct vmwgfx_gmrid_man *gman =
(struct vmwgfx_gmrid_man *)man->priv;
int vmw_thp_init(struct vmw_private *dev_priv)
{
- struct ttm_mem_type_manager *man = &dev_priv->bdev.man[TTM_PL_VRAM];
+ struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
struct vmw_thp_manager *rman;
man->available_caching = TTM_PL_FLAG_CACHED;
man->default_caching = TTM_PL_FLAG_CACHED;
void vmw_thp_fini(struct vmw_private *dev_priv)
{
- struct ttm_mem_type_manager *man = &dev_priv->bdev.man[TTM_PL_VRAM];
+ struct ttm_mem_type_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
int ret;