CHROMIUM: drm/virtgpu: check for revelant capabilites

Initialize the TTM VRAM support when host coherent memory is
detected.

BUG=chromium:924405
TEST=compile

Bug: b/153580313

Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/kernel/+/1702157
Reviewed-by: Robert Tarasov <tutankhamen@chromium.org>
Commit-Queue: Gurchetan Singh <gurchetansingh@chromium.org>
Tested-by: Gurchetan Singh <gurchetansingh@chromium.org>
[rebase54(groeck): Squashed:
	FIXUP: CHROMIUM: drm/virtgpu: check for revelant capabilites
]
Change-Id: If2c6269d82cc4e9826660d8ffaefe19320fba2e1
Signed-off-by: Guenter Roeck <groeck@chromium.org>
Signed-off-by: Lingfeng Yang <lfy@google.com>
tirimbino
Lingfeng Yang 5 years ago committed by Alistair Delva
parent 400919b2ae
commit e9855d5935
  1. 3
      drivers/gpu/drm/virtio/virtgpu_debugfs.c
  2. 3
      drivers/gpu/drm/virtio/virtgpu_drv.c
  3. 8
      drivers/gpu/drm/virtio/virtgpu_drv.h
  4. 24
      drivers/gpu/drm/virtio/virtgpu_kms.c
  5. 34
      drivers/gpu/drm/virtio/virtgpu_object.c
  6. 81
      drivers/gpu/drm/virtio/virtgpu_ttm.c

@ -47,6 +47,9 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
virtio_add_bool(m, "edid", vgdev->has_edid);
virtio_add_bool(m, "resource v2", vgdev->has_resource_v2);
virtio_add_bool(m, "shared guest", vgdev->has_shared_guest);
virtio_add_bool(m, "host coherent", vgdev->has_host_coherent);
virtio_add_int(m, "cap sets", vgdev->num_capsets);
virtio_add_int(m, "scanouts", vgdev->num_scanouts);
return 0;

@ -178,6 +178,9 @@ static unsigned int features[] = {
VIRTIO_GPU_F_VIRGL,
#endif
VIRTIO_GPU_F_EDID,
VIRTIO_GPU_F_RESOURCE_V2,
VIRTIO_GPU_F_SHARED_GUEST,
VIRTIO_GPU_F_HOST_COHERENT,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,

@ -234,12 +234,20 @@ struct virtio_gpu_device {
bool has_virgl_3d;
bool has_edid;
bool has_resource_v2;
bool has_shared_guest;
bool has_host_coherent;
struct work_struct config_changed_work;
struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets;
struct list_head cap_cache;
/* coherent memory */
int cbar;
unsigned long caddr;
unsigned long csize;
};
struct virtio_gpu_fpriv {

@ -23,6 +23,7 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/pci.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
#include <drm/drmP.h>
@ -181,6 +182,29 @@ int virtio_gpu_init(struct drm_device *dev)
DRM_INFO("EDID support available.\n");
}
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_V2)) {
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_HOST_COHERENT)) {
vgdev->cbar = 4;
vgdev->caddr = pci_resource_start(dev->pdev, vgdev->cbar);
vgdev->csize = pci_resource_len(dev->pdev, vgdev->cbar);
ret = pci_request_region(dev->pdev, vgdev->cbar, "virtio-gpu-coherent");
if (ret != 0) {
DRM_WARN("Cannot request coherent memory bar\n");
} else {
DRM_INFO("coherent host resources enabled, using %s bar %d,"
"at 0x%lx, size %ld MB", dev_name(&dev->pdev->dev),
vgdev->cbar, vgdev->caddr, vgdev->csize >> 20);
vgdev->has_host_coherent = true;
}
}
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_SHARED_GUEST))
vgdev->has_shared_guest = true;
vgdev->has_resource_v2 = true;
}
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
DRM_ERROR("failed to find virt queues\n");

@ -96,14 +96,42 @@ static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
{
u32 c = 1;
u32 ttm_caching_flags = 0;
vgbo->placement.placement = &vgbo->placement_code;
vgbo->placement.busy_placement = &vgbo->placement_code;
vgbo->placement_code.fpfn = 0;
vgbo->placement_code.lpfn = 0;
vgbo->placement_code.flags =
TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
TTM_PL_FLAG_NO_EVICT;
switch (vgbo->caching_type) {
case VIRTIO_GPU_CACHED:
ttm_caching_flags = TTM_PL_FLAG_CACHED;
break;
case VIRTIO_GPU_WRITE_COMBINE:
ttm_caching_flags = TTM_PL_FLAG_WC;
break;
case VIRTIO_GPU_UNCACHED:
ttm_caching_flags = TTM_PL_FLAG_UNCACHED;
break;
default:
ttm_caching_flags = TTM_PL_MASK_CACHING;
}
switch (vgbo->guest_memory_type) {
case VIRTIO_GPU_MEMORY_UNDEFINED:
case VIRTIO_GPU_MEMORY_TRANSFER:
case VIRTIO_GPU_MEMORY_SHARED_GUEST:
vgbo->placement_code.flags =
TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
TTM_PL_FLAG_NO_EVICT;
break;
case VIRTIO_GPU_MEMORY_HOST_COHERENT:
vgbo->placement_code.flags =
ttm_caching_flags | TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_NO_EVICT;
break;
}
vgbo->placement.num_placement = c;
vgbo->placement.num_busy_placement = c;

@ -185,6 +185,12 @@ static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
return -EINVAL;
@ -216,6 +222,7 @@ static int virtio_gpu_verify_access(struct ttm_buffer_object *bo,
static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct virtio_gpu_device *vgdev = virtio_gpu_get_vgdev(bdev);
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
mem->bus.addr = NULL;
@ -229,8 +236,18 @@ static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
case TTM_PL_SYSTEM:
case TTM_PL_TT:
/* system memory */
mem->bus.offset = 0;
mem->bus.base = 0;
mem->bus.is_iomem = false;
return 0;
case TTM_PL_VRAM:
/* coherent memory (pci bar) */
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = vgdev->caddr;
mem->bus.is_iomem = true;
return 0;
default:
DRM_ERROR("Unsupported memory type %u\n", mem->mem_type);
return -EINVAL;
}
return 0;
@ -249,8 +266,19 @@ struct virtio_gpu_ttm_tt {
struct virtio_gpu_object *obj;
};
static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
static int virtio_gpu_ttm_vram_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
return 0;
}
static int virtio_gpu_ttm_vram_unbind(struct ttm_tt *ttm)
{
return 0;
}
static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
struct ttm_mem_reg *bo_mem)
{
struct virtio_gpu_ttm_tt *gtt =
container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
@ -261,7 +289,7 @@ static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm,
return 0;
}
static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm)
static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
{
struct virtio_gpu_ttm_tt *gtt =
container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
@ -281,9 +309,15 @@ static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm)
kfree(gtt);
}
static struct ttm_backend_func virtio_gpu_tt_func = {
.bind = &virtio_gpu_ttm_tt_bind,
.unbind = &virtio_gpu_ttm_tt_unbind,
static struct ttm_backend_func virtio_gpu_backend_func = {
.bind = &virtio_gpu_ttm_backend_bind,
.unbind = &virtio_gpu_ttm_backend_unbind,
.destroy = &virtio_gpu_ttm_tt_destroy,
};
static struct ttm_backend_func virtio_gpu_vram_func = {
.bind = &virtio_gpu_ttm_vram_bind,
.unbind = &virtio_gpu_ttm_vram_unbind,
.destroy = &virtio_gpu_ttm_tt_destroy,
};
@ -306,19 +340,33 @@ static struct ttm_tt *virtio_gpu_ttm_tt_create2(struct ttm_buffer_object *bo,
{
unsigned long size = bo->num_pages << PAGE_SHIFT;
struct virtio_gpu_device *vgdev;
struct virtio_gpu_object *obj;
struct virtio_gpu_ttm_tt *gtt;
vgdev = virtio_gpu_get_vgdev(bo->bdev);
obj = container_of(bo, struct virtio_gpu_object, tbo);
gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL)
return NULL;
gtt->ttm.ttm.func = &virtio_gpu_tt_func;
gtt->obj = container_of(bo, struct virtio_gpu_object, tbo);
if (ttm_dma_tt_init(&gtt->ttm, bo->bdev, size, page_flags,
dummy_read_page)) {
kfree(gtt);
return NULL;
gtt->obj = obj;
if (obj->guest_memory_type == VIRTIO_GPU_MEMORY_HOST_COHERENT) {
gtt->ttm.ttm.func = &virtio_gpu_vram_func;
if (ttm_tt_init(&gtt->ttm.ttm, bo->bdev, size, page_flags,
dummy_read_page)) {
kfree(gtt);
return NULL;
}
} else {
gtt->ttm.ttm.func = &virtio_gpu_backend_func;
if (ttm_dma_tt_init(&gtt->ttm, bo->bdev, size, page_flags,
dummy_read_page)) {
kfree(gtt);
return NULL;
}
}
return &gtt->ttm.ttm;
}
@ -370,6 +418,15 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
DRM_ERROR("Failed initializing GTT heap.\n");
goto err_mm_init;
}
if (vgdev->has_host_coherent) {
r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_VRAM,
vgdev->csize >> PAGE_SHIFT);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
goto err_mm_init;
}
}
return 0;
err_mm_init:

Loading…
Cancel
Save