Remove unneeded patches

This commit is contained in:
Hector Martin 2017-01-05 11:50:09 +09:00
parent 974a9e85a5
commit 391335346f
4 changed files with 0 additions and 382 deletions

View file

@ -1,48 +0,0 @@
From abd3f05b15245951daf6e6aa4228e176e433ae5c Mon Sep 17 00:00:00 2001
From: Flora Cui <Flora.Cui@amd.com>
Date: Tue, 9 Aug 2016 15:47:51 +0800
Subject: [PATCH 106/117] amdgpu: move hybrid specific ioctl to the end
To avoid conflicts
Change-Id: I41a3b62363b2d653e6e8726073c2e9c816604030
Signed-off-by: Flora Cui <Flora.Cui@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
---
include/drm/amdgpu_drm.h | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index 7ffd26b..6ccad71 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -46,9 +46,10 @@
#define DRM_AMDGPU_WAIT_CS 0x09
#define DRM_AMDGPU_GEM_OP 0x10
#define DRM_AMDGPU_GEM_USERPTR 0x11
-#define DRM_AMDGPU_WAIT_FENCES 0x12
-#define DRM_AMDGPU_GEM_FIND_BO 0x13
-#define DRM_AMDGPU_FREESYNC 0x14
+#define DRM_AMDGPU_FREESYNC 0x14
+/* hybrid specific ioctls */
+#define DRM_AMDGPU_WAIT_FENCES 0x5e
+#define DRM_AMDGPU_GEM_FIND_BO 0x5f
#define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create)
#define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap)
@@ -62,9 +63,10 @@
#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
-#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
-#define DRM_IOCTL_AMDGPU_GEM_FIND_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_FIND_BO, struct drm_amdgpu_gem_find_bo)
#define DRM_IOCTL_AMDGPU_FREESYNC DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
+/* hybrid specific ioctls */
+#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
+#define DRM_IOCTL_AMDGPU_GEM_FIND_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_FIND_BO, struct drm_amdgpu_gem_find_bo)
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
--
2.7.4

View file

@ -1,40 +0,0 @@
From fdcfc33fadaf63e01061fc41e113c0ed777cc137 Mon Sep 17 00:00:00 2001
From: Hawking Zhang <Hawking.Zhang@amd.com>
Date: Fri, 12 Aug 2016 14:49:53 +0800
Subject: [PATCH 110/117] drm/amdgpu: move freesync ioctl to hybrid specific
range
Change-Id: If324e05ac71107d00c24567a0d2f3380b2084a4f
Signed-off-by: Hawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: Flora Cui <Flora.Cui@amd.com>
---
include/drm/amdgpu_drm.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index 6ccad71..cda8f36 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -46,8 +46,8 @@
#define DRM_AMDGPU_WAIT_CS 0x09
#define DRM_AMDGPU_GEM_OP 0x10
#define DRM_AMDGPU_GEM_USERPTR 0x11
-#define DRM_AMDGPU_FREESYNC 0x14
/* hybrid specific ioctls */
+#define DRM_AMDGPU_FREESYNC 0x5d
#define DRM_AMDGPU_WAIT_FENCES 0x5e
#define DRM_AMDGPU_GEM_FIND_BO 0x5f
@@ -63,8 +63,8 @@
#define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs)
#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
-#define DRM_IOCTL_AMDGPU_FREESYNC DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
/* hybrid specific ioctls */
+#define DRM_IOCTL_AMDGPU_FREESYNC DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
#define DRM_IOCTL_AMDGPU_GEM_FIND_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_FIND_BO, struct drm_amdgpu_gem_find_bo)
--
2.7.4

View file

@ -1,187 +0,0 @@
From 1474cc7321f29b223249f9f7c09797534aa67288 Mon Sep 17 00:00:00 2001
From: Flora Cui <Flora.Cui@amd.com>
Date: Thu, 11 Aug 2016 15:25:14 +0800
Subject: [PATCH 115/117] amdgpu: implement direct gma
Change-Id: I37a6a0f79a91b8e793fc90eb3955045bebf24848
Signed-off-by: Flora Cui <Flora.Cui@amd.com>
---
amdgpu/amdgpu.h | 43 +++++++++++++++++++++++++++++++++++++
amdgpu/amdgpu_bo.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++-
include/drm/amdgpu_drm.h | 12 +++++++++++
3 files changed, 109 insertions(+), 1 deletion(-)
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index 763a3a6..525bf8e 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -727,6 +727,49 @@ int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev,
amdgpu_bo_handle *buf_handle,
uint64_t *offset_in_bo);
+/**
+ * Request GPU access to physical memory from 3rd party device.
+ *
+ * \param dev - [in] Device handle. See #amdgpu_device_initialize()
+ * \param phys_address - [in] Physical address from 3rd party device which
+ * we want to map to GPU address space (make GPU accessible)
+ * (This address must be correctly aligned).
+ * \param size - [in] Size of allocation (must be correctly aligned)
+ * \param buf_handle - [out] Buffer handle for the userptr memory
+ * resource on submission and be used in other operations.
+ *
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+ * \note
+ * This call should guarantee that such memory will be persistently
+ * "locked" / make non-pageable. The purpose of this call is to provide
+ * opportunity for GPU get access to this resource during submission.
+ *
+ *
+ * Supported (theoretical) max. size of mapping is restricted only by
+ * capability.direct_gma_size. See #amdgpu_query_capability()
+ *
+ * It is responsibility of caller to correctly specify physical_address
+*/
+int amdgpu_create_bo_from_phys_mem(amdgpu_device_handle dev,
+ uint64_t phys_address, uint64_t size,
+ amdgpu_bo_handle *buf_handle);
+
+/**
+ * Get physical address from BO
+ *
+ * \param buf_handle - [in] Buffer handle for the physical address.
+ * \param phys_address - [out] Physical address of this BO.
+ *
+ *
+ * \return 0 on success\n
+ * <0 - Negative POSIX Error code
+ *
+*/
+int amdgpu_bo_get_phys_address(amdgpu_bo_handle buf_handle,
+ uint64_t *phys_address);
/**
* Free previosuly allocated memory
diff --git a/amdgpu/amdgpu_bo.c b/amdgpu/amdgpu_bo.c
index a07d0b5..6abc5e3 100644
--- a/amdgpu/amdgpu_bo.c
+++ b/amdgpu/amdgpu_bo.c
@@ -87,7 +87,8 @@ int amdgpu_bo_alloc(amdgpu_device_handle dev,
int r = 0;
/* It's an error if the heap is not specified */
- if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM)))
+ if (!(heap & (AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM
+ | AMDGPU_GEM_DOMAIN_DGMA)))
return -EINVAL;
bo = calloc(1, sizeof(struct amdgpu_bo));
@@ -570,6 +571,58 @@ int amdgpu_get_bo_from_fb_id(amdgpu_device_handle dev, unsigned int fb_id, struc
return r;
}
+int amdgpu_create_bo_from_phys_mem(amdgpu_device_handle dev,
+ uint64_t phys_address, uint64_t size,
+ amdgpu_bo_handle *buf_handle)
+{
+ struct drm_amdgpu_gem_dgma args;
+ amdgpu_bo_handle bo;
+ int r;
+
+ if (phys_address == 0 || phys_address & 4095 ||
+ size == 0 || size & 4095)
+ return -EINVAL;
+
+ args.addr = phys_address;
+ args.size = size;
+ args.op = AMDGPU_GEM_DGMA_IMPORT;
+ r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_DGMA,
+ &args, sizeof(args));
+ if (r)
+ return r;
+
+ bo = calloc(1, sizeof(struct amdgpu_bo));
+ if (!bo)
+ return -ENOMEM;
+
+ atomic_set(&bo->refcount, 1);
+ pthread_mutex_init(&bo->cpu_access_mutex, NULL);
+ bo->dev = dev;
+ bo->alloc_size = size;
+ bo->handle = args.handle;
+
+ *buf_handle = bo;
+
+ return 0;
+}
+
+int amdgpu_bo_get_phys_address(amdgpu_bo_handle buf_handle,
+ uint64_t *phys_address)
+{
+ struct drm_amdgpu_gem_dgma args;
+ int r;
+
+ args.op = AMDGPU_GEM_DGMA_QUERY_PHYS_ADDR;
+ args.handle = buf_handle->handle;
+ r = drmCommandWriteRead(buf_handle->dev->fd, DRM_AMDGPU_GEM_DGMA,
+ &args, sizeof(args));
+ if (r)
+ return r;
+
+ *phys_address = args.addr;
+ return 0;
+}
+
int amdgpu_bo_free(amdgpu_bo_handle buf_handle)
{
/* Just drop the reference. */
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index 14d800e..413a9dc 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -47,6 +47,7 @@
#define DRM_AMDGPU_GEM_OP 0x10
#define DRM_AMDGPU_GEM_USERPTR 0x11
/* hybrid specific ioctls */
+#define DRM_AMDGPU_GEM_DGMA 0x5c
#define DRM_AMDGPU_FREESYNC 0x5d
#define DRM_AMDGPU_WAIT_FENCES 0x5e
#define DRM_AMDGPU_GEM_FIND_BO 0x5f
@@ -64,6 +65,7 @@
#define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op)
#define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr)
/* hybrid specific ioctls */
+#define DRM_IOCTL_AMDGPU_GEM_DGMA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_DGMA, struct drm_amdgpu_gem_dgma)
#define DRM_IOCTL_AMDGPU_FREESYNC DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FREESYNC, struct drm_amdgpu_freesync)
#define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences)
#define DRM_IOCTL_AMDGPU_GEM_FIND_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_FIND_BO, struct drm_amdgpu_gem_find_bo)
@@ -74,6 +76,7 @@
#define AMDGPU_GEM_DOMAIN_GDS 0x8
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
+#define AMDGPU_GEM_DOMAIN_DGMA 0x40
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@@ -209,6 +212,15 @@ struct drm_amdgpu_gem_userptr {
uint32_t handle;
};
+#define AMDGPU_GEM_DGMA_IMPORT 0
+#define AMDGPU_GEM_DGMA_QUERY_PHYS_ADDR 1
+struct drm_amdgpu_gem_dgma {
+ uint64_t addr;
+ uint64_t size;
+ uint32_t op;
+ uint32_t handle;
+};
+
struct drm_amdgpu_gem_find_bo {
uint64_t addr;
uint64_t size;
--
2.7.4

View file

@ -1,107 +0,0 @@
From 331577c5a8736f15fdf55a7606414efcf78a5dff Mon Sep 17 00:00:00 2001
From: Flora Cui <Flora.Cui@amd.com>
Date: Thu, 11 Aug 2016 15:26:16 +0800
Subject: [PATCH 116/117] tests/amdgpu: add direct gma test
Change-Id: Ib00252eff16a84f16f01039ff39f957bff903bae
Signed-off-by: Flora Cui <Flora.Cui@amd.com>
---
tests/amdgpu/bo_tests.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 63 insertions(+), 1 deletion(-)
diff --git a/tests/amdgpu/bo_tests.c b/tests/amdgpu/bo_tests.c
index 195667f..5d1f67b 100644
--- a/tests/amdgpu/bo_tests.c
+++ b/tests/amdgpu/bo_tests.c
@@ -26,6 +26,7 @@
#endif
#include <stdio.h>
+#include <inttypes.h>
#include "CUnit/Basic.h"
@@ -47,7 +48,7 @@ static void amdgpu_bo_export_import(void);
static void amdgpu_bo_metadata(void);
static void amdgpu_bo_map_unmap(void);
static void amdgpu_get_fb_id_and_handle(void);
-
+static void amdgpu_bo_direct_gma(void);
CU_TestInfo bo_tests[] = {
{ "Export/Import", amdgpu_bo_export_import },
@@ -56,6 +57,7 @@ CU_TestInfo bo_tests[] = {
#endif
{ "CPU map/unmap", amdgpu_bo_map_unmap },
{ "GET FB_ID AND FB_HANDLE", amdgpu_get_fb_id_and_handle },
+ { "Direct GMA", amdgpu_bo_direct_gma },
CU_TEST_INFO_NULL,
};
@@ -202,3 +204,63 @@ static void amdgpu_get_fb_id_and_handle(void)
CU_ASSERT_EQUAL(r, 0);
CU_ASSERT_NOT_EQUAL(output.buf_handle, 0);
}
+
+#define TEST_LOOP 20
+static void amdgpu_bo_direct_gma(void)
+{
+ amdgpu_bo_handle buf_handle[TEST_LOOP] = {0};
+ amdgpu_bo_handle buf_handle_import[TEST_LOOP] = {0};
+ uint32_t *ptr[TEST_LOOP] = {0};
+ struct amdgpu_bo_alloc_request req = {0};
+ struct drm_amdgpu_capability cap;
+ uint64_t size=4096, phys_addr, remain;
+ int i, j, r;
+
+ amdgpu_query_capability(device_handle, &cap);
+ if(!(cap.flag & AMDGPU_CAPABILITY_DIRECT_GMA_FLAG))
+ return;
+
+ amdgpu_vprintf("direct_gma_size is %d MB\n", cap.direct_gma_size);
+ remain = cap.direct_gma_size << 20;
+
+ req.preferred_heap = AMDGPU_GEM_DOMAIN_DGMA;
+ for (i = 0; i < TEST_LOOP; i++) {
+ req.alloc_size = size;
+ r = amdgpu_bo_alloc(device_handle, &req, &buf_handle[i]);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_bo_get_phys_address(buf_handle[i], &phys_addr);
+ CU_ASSERT_EQUAL(r, 0);
+ amdgpu_vprintf("bo_size %"PRIx64" phys_addr %"PRIx64"\n", size, phys_addr);
+ r = amdgpu_create_bo_from_phys_mem(device_handle, phys_addr, size, &buf_handle_import[i]);
+ CU_ASSERT_EQUAL(r, 0);
+ r = amdgpu_bo_cpu_map(buf_handle_import[i], (void **)&ptr[i]);
+ CU_ASSERT_EQUAL(r, 0);
+
+ for (j = 0; j < (size / 4); ++j)
+ ptr[i][j] = 0xdeadbeef;
+ remain -= size;
+ size <<= 1;
+ amdgpu_vprintf("test loop %d finished, remain %"PRIx64", try to alloc %"PRIx64"\n", i, remain, size);
+ if (remain < size)
+ break;
+
+ }
+
+ for (i = 0; i < TEST_LOOP; i++) {
+ if (ptr[i]) {
+ r = amdgpu_bo_cpu_unmap(buf_handle_import[i]);
+ CU_ASSERT_EQUAL(r, 0);
+ }
+
+ if (buf_handle_import[i]) {
+ r = amdgpu_bo_free(buf_handle_import[i]);
+ CU_ASSERT_EQUAL(r, 0);
+ }
+
+ if (buf_handle[i]) {
+ r = amdgpu_bo_free(buf_handle[i]);
+ CU_ASSERT_EQUAL(r, 0);
+ }
+ }
+}
--
2.7.4