msm: vidc_3x: ION Upgrade changes for video

With kernel 4.14, most of ion apis are deprecated.
These changes are to upgrade video driver using dma apis.

Change-Id: I95a7c0ab0abd97c6860113e911f93dad3166593d
Signed-off-by: Vasantha Balla <vballa@codeaurora.org>
tirimbino
Vasantha Balla 5 years ago
parent 4c5fed65f1
commit 877971827a
  1. 801
      drivers/media/platform/msm/vidc_3x/msm_smem.c
  2. 120
      drivers/media/platform/msm/vidc_3x/msm_vidc.c
  3. 197
      drivers/media/platform/msm/vidc_3x/msm_vidc_common.c
  4. 7
      drivers/media/platform/msm/vidc_3x/msm_vidc_common.h
  5. 36
      drivers/media/platform/msm/vidc_3x/msm_vidc_internal.h
  6. 12
      drivers/media/platform/msm/vidc_3x/msm_vidc_res_parse.c
  7. 77
      drivers/media/platform/msm/vidc_3x/venus_hfi.c
  8. 3
      drivers/media/platform/msm/vidc_3x/venus_hfi.h
  9. 7
      include/media/msm_vidc.h
  10. 68
      include/trace/events/msm_vidc.h

File diff suppressed because it is too large Load Diff

@ -221,9 +221,8 @@ struct buffer_info *get_registered_buf(struct msm_vidc_inst *inst,
*plane = 0;
list_for_each_entry(temp, &inst->registeredbufs.list, list) {
for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
bool ion_hndl_matches = temp->handle[i] ?
msm_smem_compare_buffers(inst->mem_client, fd,
temp->handle[i]->smem_priv) : false;
bool dma_matches = msm_smem_compare_buffers(fd,
temp->smem[i].dma_buf);
bool device_addr_matches = device_addr ==
temp->device_addr[i];
bool contains_within = CONTAINS(temp->buff_off[i],
@ -233,7 +232,7 @@ struct buffer_info *get_registered_buf(struct msm_vidc_inst *inst,
temp->buff_off[i], temp->size[i]);
if (!temp->inactive &&
(ion_hndl_matches || device_addr_matches) &&
(dma_matches || device_addr_matches) &&
(contains_within || overlaps)) {
dprintk(VIDC_DBG,
"This memory region is already mapped\n");
@ -254,7 +253,6 @@ static struct msm_smem *get_same_fd_buffer(struct msm_vidc_inst *inst, int fd)
{
struct buffer_info *temp;
struct msm_smem *same_fd_handle = NULL;
int i;
if (!fd)
@ -268,14 +266,13 @@ static struct msm_smem *get_same_fd_buffer(struct msm_vidc_inst *inst, int fd)
mutex_lock(&inst->registeredbufs.lock);
list_for_each_entry(temp, &inst->registeredbufs.list, list) {
for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
bool ion_hndl_matches = temp->handle[i] ?
msm_smem_compare_buffers(inst->mem_client, fd,
temp->handle[i]->smem_priv) : false;
if (ion_hndl_matches && temp->mapped[i]) {
bool dma_matches = msm_smem_compare_buffers(fd,
temp->smem[i].dma_buf);
if (dma_matches && temp->mapped[i]) {
temp->same_fd_ref[i]++;
dprintk(VIDC_INFO,
"Found same fd buffer\n");
same_fd_handle = temp->handle[i];
same_fd_handle = &temp->smem[i];
break;
}
}
@ -367,23 +364,6 @@ static inline void repopulate_v4l2_buffer(struct v4l2_buffer *b,
}
}
static struct msm_smem *map_buffer(struct msm_vidc_inst *inst,
struct v4l2_plane *p, enum hal_buffer buffer_type)
{
struct msm_smem *handle = NULL;
handle = msm_comm_smem_user_to_kernel(inst,
p->reserved[0],
p->reserved[1],
buffer_type);
if (!handle) {
dprintk(VIDC_ERR,
"%s: Failed to get device buffer address\n", __func__);
return NULL;
}
return handle;
}
static inline enum hal_buffer get_hal_buffer_type(
struct msm_vidc_inst *inst, struct v4l2_buffer *b)
{
@ -518,17 +498,27 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
same_fd_handle->device_addr + binfo->buff_off[i];
b->m.planes[i].m.userptr = binfo->device_addr[i];
binfo->mapped[i] = false;
binfo->handle[i] = same_fd_handle;
binfo->smem[i] = *same_fd_handle;
} else {
binfo->handle[i] = map_buffer(inst, &b->m.planes[i],
get_hal_buffer_type(inst, b));
if (!binfo->handle[i]) {
rc = -EINVAL;
binfo->smem[i].buffer_type = binfo->type;
binfo->smem[i].fd = binfo->fd[i];
binfo->smem[i].offset = binfo->buff_off[i];
binfo->smem[i].size = binfo->size[i];
rc = msm_smem_map_dma_buf(inst, &binfo->smem[i]);
if (rc) {
dprintk(VIDC_ERR, "%s: map failed.\n",
__func__);
goto exit;
}
/* increase refcount as we get both fbd and rbr */
rc = msm_smem_map_dma_buf(inst, &binfo->smem[i]);
if (rc) {
dprintk(VIDC_ERR, "%s: map failed..\n",
__func__);
goto exit;
}
binfo->mapped[i] = true;
binfo->device_addr[i] = binfo->handle[i]->device_addr +
binfo->device_addr[i] = binfo->smem[i].device_addr +
binfo->buff_off[i];
b->m.planes[i].m.userptr = binfo->device_addr[i];
}
@ -541,7 +531,7 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
}
dprintk(VIDC_DBG,
"%s: [MAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
__func__, binfo, i, binfo->handle[i],
__func__, binfo, i, binfo->smem[i],
&binfo->device_addr[i], binfo->fd[i],
binfo->buff_off[i], binfo->mapped[i]);
}
@ -594,7 +584,7 @@ int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
for (i = 0; i < temp->num_planes; i++) {
dprintk(VIDC_DBG,
"%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
__func__, temp, i, temp->handle[i],
__func__, temp, i, temp->smem[i],
&temp->device_addr[i], temp->fd[i],
temp->buff_off[i], temp->mapped[i]);
/*
@ -606,17 +596,19 @@ int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
* For buffers which share the same fd, do not unmap and keep
* the buffer info in registered list.
*/
if (temp->handle[i] && temp->mapped[i] &&
!temp->same_fd_ref[i]) {
if (temp->mapped[i] && !temp->same_fd_ref[i]) {
if (msm_smem_unmap_dma_buf(inst, &temp->smem[i]))
dprintk(VIDC_DBG, "unmap failed..\n");
msm_comm_smem_free(inst,
temp->handle[i]);
&temp->smem[i]);
}
if (temp->same_fd_ref[i])
keep_node = true;
else {
temp->fd[i] = 0;
temp->handle[i] = 0;
//temp->smem[i] = 0;
temp->device_addr[i] = 0;
temp->uvaddr[i] = 0;
}
@ -679,18 +671,14 @@ int output_buffer_cache_invalidate(struct msm_vidc_inst *inst,
for (i = 0; i < binfo->num_planes; i++) {
if (binfo->handle[i]) {
rc = msm_comm_smem_cache_operations(inst,
binfo->handle[i], SMEM_CACHE_INVALIDATE);
if (rc) {
dprintk(VIDC_ERR,
"%s: Failed to clean caches: %d\n",
__func__, rc);
return -EINVAL;
}
} else
dprintk(VIDC_DBG, "%s: NULL handle for plane %d\n",
__func__, i);
rc = msm_comm_smem_cache_operations(inst,
&binfo->smem[i], SMEM_CACHE_INVALIDATE);
if (rc) {
dprintk(VIDC_ERR,
"%s: Failed to clean caches: %d\n",
__func__, rc);
return -EINVAL;
}
}
return 0;
}
@ -812,14 +800,14 @@ free_and_unmap:
if (bi->type == buffer_type) {
list_del(&bi->list);
for (i = 0; i < bi->num_planes; i++) {
if (bi->handle[i] && bi->mapped[i]) {
if (bi->mapped[i]) {
dprintk(VIDC_DBG,
"%s: [UNMAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
__func__, bi, i, bi->handle[i],
__func__, bi, i, bi->smem[i],
&bi->device_addr[i], bi->fd[i],
bi->buff_off[i], bi->mapped[i]);
msm_comm_smem_free(inst,
bi->handle[i]);
&bi->smem[i]);
}
}
kfree(bi);
@ -881,10 +869,10 @@ int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
&binfo->device_addr[i]);
if (inst->fmts[OUTPUT_PORT].fourcc ==
V4L2_PIX_FMT_HEVC_HYBRID && binfo->handle[i] &&
V4L2_PIX_FMT_HEVC_HYBRID &&
b->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
rc = msm_comm_smem_cache_operations(inst,
binfo->handle[i], SMEM_CACHE_INVALIDATE);
&binfo->smem[i], SMEM_CACHE_INVALIDATE);
if (rc) {
dprintk(VIDC_ERR,
"Failed to inv caches: %d\n", rc);
@ -892,10 +880,9 @@ int msm_vidc_qbuf(void *instance, struct v4l2_buffer *b)
}
}
if (binfo->handle[i] &&
(b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)) {
if (b->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
rc = msm_comm_smem_cache_operations(inst,
binfo->handle[i], SMEM_CACHE_CLEAN);
&binfo->smem[i], SMEM_CACHE_CLEAN);
if (rc) {
dprintk(VIDC_ERR,
"Failed to clean caches: %d\n", rc);
@ -1223,12 +1210,6 @@ void *msm_vidc_open(int core_id, int session_type)
i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) {
init_completion(&inst->completions[i]);
}
inst->mem_client = msm_smem_new_client(SMEM_DMA,
&inst->core->resources, session_type);
if (!inst->mem_client) {
dprintk(VIDC_ERR, "Failed to create memory client\n");
goto fail_mem_client;
}
if (session_type == MSM_VIDC_DECODER) {
msm_vdec_inst_init(inst);
@ -1292,8 +1273,6 @@ fail_bufq_output:
vb2_queue_release(&inst->bufq[CAPTURE_PORT].vb2_bufq);
fail_bufq_capture:
msm_comm_ctrl_deinit(inst);
msm_smem_delete_client(inst->mem_client);
fail_mem_client:
DEINIT_MSM_VIDC_LIST(&inst->eosbufs);
kfree(inst);
inst = NULL;
@ -1407,8 +1386,8 @@ int msm_vidc_close(void *instance)
for (i = 0; i < min(bi->num_planes, VIDEO_MAX_PLANES);
i++) {
if (bi->handle[i] && bi->mapped[i])
msm_comm_smem_free(inst, bi->handle[i]);
if (bi->mapped[i])
msm_comm_smem_free(inst, &bi->smem[i]);
}
kfree(bi);
@ -1427,7 +1406,6 @@ int msm_vidc_close(void *instance)
"Failed to move video instance to uninit state\n");
msm_comm_session_clean(inst);
msm_smem_delete_client(inst->mem_client);
kref_put(&inst->kref, close_helper);
return 0;

@ -811,9 +811,9 @@ static void handle_session_release_buf_done(enum hal_command_response cmd,
mutex_lock(&inst->scratchbufs.lock);
list_for_each_safe(ptr, next, &inst->scratchbufs.list) {
buf = list_entry(ptr, struct internal_buf, list);
if (address == (u32)buf->handle->device_addr) {
dprintk(VIDC_DBG, "releasing scratch: %pa\n",
&buf->handle->device_addr);
if (address == buf->smem.device_addr) {
dprintk(VIDC_DBG, "releasing scratch: %x\n",
buf->smem.device_addr);
buf_found = true;
}
}
@ -822,9 +822,9 @@ static void handle_session_release_buf_done(enum hal_command_response cmd,
mutex_lock(&inst->persistbufs.lock);
list_for_each_safe(ptr, next, &inst->persistbufs.list) {
buf = list_entry(ptr, struct internal_buf, list);
if (address == (u32)buf->handle->device_addr) {
dprintk(VIDC_DBG, "releasing persist: %pa\n",
&buf->handle->device_addr);
if (address == buf->smem.device_addr) {
dprintk(VIDC_DBG, "releasing persist: %x\n",
buf->smem.device_addr);
buf_found = true;
}
}
@ -1442,8 +1442,8 @@ void validate_output_buffers(struct msm_vidc_inst *inst)
list_for_each_entry(binfo, &inst->outputbufs.list, list) {
if (binfo->buffer_ownership != DRIVER) {
dprintk(VIDC_DBG,
"This buffer is with FW %pa\n",
&binfo->handle->device_addr);
"This buffer is with FW %x\n",
binfo->smem.device_addr);
continue;
}
buffers_owned_by_driver++;
@ -1462,7 +1462,6 @@ int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst)
{
struct internal_buf *binfo;
struct hfi_device *hdev;
struct msm_smem *handle;
struct vidc_frame_data frame_data = {0};
struct hal_buffer_requirements *output_buf, *extra_buf;
int rc = 0;
@ -1492,13 +1491,12 @@ int msm_comm_queue_output_buffers(struct msm_vidc_inst *inst)
list_for_each_entry(binfo, &inst->outputbufs.list, list) {
if (binfo->buffer_ownership != DRIVER)
continue;
handle = binfo->handle;
frame_data.alloc_len = output_buf->buffer_size;
frame_data.filled_len = 0;
frame_data.offset = 0;
frame_data.device_addr = handle->device_addr;
frame_data.device_addr = binfo->smem.device_addr;
frame_data.flags = 0;
frame_data.extradata_addr = handle->device_addr +
frame_data.extradata_addr = binfo->smem.device_addr +
output_buf->buffer_size;
frame_data.buffer_type = HAL_BUFFER_OUTPUT;
frame_data.extradata_size = extra_buf ?
@ -1980,17 +1978,17 @@ static int handle_multi_stream_buffers(struct msm_vidc_inst *inst,
phys_addr_t dev_addr)
{
struct internal_buf *binfo;
struct msm_smem *handle;
struct msm_smem *smem;
bool found = false;
mutex_lock(&inst->outputbufs.lock);
list_for_each_entry(binfo, &inst->outputbufs.list, list) {
handle = binfo->handle;
if (handle && dev_addr == handle->device_addr) {
smem = &binfo->smem;
if (smem && dev_addr == smem->device_addr) {
if (binfo->buffer_ownership == DRIVER) {
dprintk(VIDC_ERR,
"FW returned same buffer: %pa\n",
&dev_addr);
"FW returned same buffer: %x\n",
dev_addr);
break;
}
binfo->buffer_ownership = DRIVER;
@ -2002,8 +2000,8 @@ static int handle_multi_stream_buffers(struct msm_vidc_inst *inst,
if (!found) {
dprintk(VIDC_ERR,
"Failed to find output buffer in queued list: %pa\n",
&dev_addr);
"Failed to find output buffer in queued list: %x\n",
dev_addr);
}
return 0;
@ -3069,9 +3067,8 @@ static int set_output_buffers(struct msm_vidc_inst *inst,
enum hal_buffer buffer_type)
{
int rc = 0;
struct msm_smem *handle;
struct internal_buf *binfo;
u32 smem_flags = 0, buffer_size;
struct internal_buf *binfo = NULL;
u32 smem_flags = SMEM_UNCACHED, buffer_size;
struct hal_buffer_requirements *output_buf, *extradata_buf;
int i;
struct hfi_device *hdev;
@ -3117,33 +3114,25 @@ static int set_output_buffers(struct msm_vidc_inst *inst,
if (output_buf->buffer_size) {
for (i = 0; i < output_buf->buffer_count_actual;
i++) {
handle = msm_comm_smem_alloc(inst,
buffer_size, 1, smem_flags,
buffer_type, 0);
if (!handle) {
dprintk(VIDC_ERR,
"Failed to allocate output memory\n");
rc = -ENOMEM;
goto err_no_mem;
}
rc = msm_comm_smem_cache_operations(inst,
handle, SMEM_CACHE_CLEAN);
if (rc) {
dprintk(VIDC_WARN,
"Failed to clean cache may cause undefined behavior\n");
}
binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
if (!binfo) {
dprintk(VIDC_ERR, "Out of memory\n");
rc = -ENOMEM;
goto fail_kzalloc;
}
binfo->handle = handle;
rc = msm_comm_smem_alloc(inst,
buffer_size, 1, smem_flags,
buffer_type, 0, &binfo->smem);
if (rc) {
dprintk(VIDC_ERR,
"Failed to allocate output memory\n");
goto err_no_mem;
}
binfo->buffer_type = buffer_type;
binfo->buffer_ownership = DRIVER;
dprintk(VIDC_DBG, "Output buffer address: %pa\n",
&handle->device_addr);
dprintk(VIDC_DBG, "Output buffer address: %#x\n",
binfo->smem.device_addr);
if (inst->buffer_mode_set[CAPTURE_PORT] ==
HAL_BUFFER_MODE_STATIC) {
@ -3154,9 +3143,9 @@ static int set_output_buffers(struct msm_vidc_inst *inst,
buffer_info.buffer_type = buffer_type;
buffer_info.num_buffers = 1;
buffer_info.align_device_addr =
handle->device_addr;
binfo->smem.device_addr;
buffer_info.extradata_addr =
handle->device_addr +
binfo->smem.device_addr +
output_buf->buffer_size;
if (extradata_buf)
buffer_info.extradata_size =
@ -3177,10 +3166,10 @@ static int set_output_buffers(struct msm_vidc_inst *inst,
}
return rc;
fail_set_buffers:
msm_comm_smem_free(inst, &binfo->smem);
err_no_mem:
kfree(binfo);
fail_kzalloc:
msm_comm_smem_free(inst, handle);
err_no_mem:
return rc;
}
@ -3258,10 +3247,6 @@ static bool reuse_internal_buffers(struct msm_vidc_inst *inst,
mutex_lock(&buf_list->lock);
list_for_each_entry(buf, &buf_list->list, list) {
if (!buf->handle) {
reused = false;
break;
}
if (buf->buffer_type != buffer_type)
continue;
@ -3278,7 +3263,7 @@ static bool reuse_internal_buffers(struct msm_vidc_inst *inst,
&& buffer_type != HAL_BUFFER_INTERNAL_PERSIST_1) {
rc = set_internal_buf_on_fw(inst, buffer_type,
buf->handle, true);
&buf->smem, true);
if (rc) {
dprintk(VIDC_ERR,
"%s: session_set_buffers failed\n",
@ -3299,9 +3284,8 @@ static int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst,
struct hal_buffer_requirements *internal_bufreq,
struct msm_vidc_list *buf_list)
{
struct msm_smem *handle;
struct internal_buf *binfo;
u32 smem_flags = 0;
u32 smem_flags = SMEM_UNCACHED;
int rc = 0;
int i = 0;
@ -3315,27 +3299,25 @@ static int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst,
smem_flags |= SMEM_SECURE;
for (i = 0; i < internal_bufreq->buffer_count_actual; i++) {
handle = msm_comm_smem_alloc(inst, internal_bufreq->buffer_size,
1, smem_flags, internal_bufreq->buffer_type, 0);
if (!handle) {
dprintk(VIDC_ERR,
"Failed to allocate scratch memory\n");
rc = -ENOMEM;
goto err_no_mem;
}
binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
if (!binfo) {
dprintk(VIDC_ERR, "Out of memory\n");
rc = -ENOMEM;
goto fail_kzalloc;
}
rc = msm_comm_smem_alloc(inst, internal_bufreq->buffer_size,
1, smem_flags, internal_bufreq->buffer_type,
0, &binfo->smem);
if (rc) {
dprintk(VIDC_ERR,
"Failed to allocate scratch memory\n");
goto err_no_mem;
}
binfo->handle = handle;
binfo->buffer_type = internal_bufreq->buffer_type;
rc = set_internal_buf_on_fw(inst, internal_bufreq->buffer_type,
handle, false);
&binfo->smem, false);
if (rc)
goto fail_set_buffers;
@ -3346,10 +3328,10 @@ static int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst,
return rc;
fail_set_buffers:
msm_comm_smem_free(inst, &binfo->smem);
err_no_mem:
kfree(binfo);
fail_kzalloc:
msm_comm_smem_free(inst, handle);
err_no_mem:
return rc;
}
@ -3578,7 +3560,7 @@ int msm_vidc_comm_cmd(void *instance, union msm_v4l2_cmd *cmd)
struct vidc_frame_data data = {0};
struct hfi_device *hdev = NULL;
struct eos_buf *binfo = NULL;
u32 smem_flags = 0;
u32 smem_flags = SMEM_UNCACHED;
if (inst->state != MSM_VIDC_START_DONE) {
dprintk(VIDC_DBG,
@ -3603,8 +3585,16 @@ int msm_vidc_comm_cmd(void *instance, union msm_v4l2_cmd *cmd)
if (inst->flags & VIDC_SECURE)
smem_flags |= SMEM_SECURE;
msm_comm_smem_alloc(inst,
SZ_4K, 1, smem_flags, HAL_BUFFER_INPUT, 0);
rc = msm_comm_smem_alloc(inst,
SZ_4K, 1, smem_flags,
HAL_BUFFER_INPUT, 0, &binfo->smem);
if (rc) {
kfree(binfo);
dprintk(VIDC_ERR,
"Failed to allocate output memory\n");
rc = -ENOMEM;
break;
}
mutex_lock(&inst->eosbufs.lock);
list_add_tail(&binfo->list, &inst->eosbufs.list);
@ -4177,7 +4167,7 @@ int msm_comm_release_output_buffers(struct msm_vidc_inst *inst)
}
mutex_lock(&inst->outputbufs.lock);
list_for_each_entry_safe(buf, dummy, &inst->outputbufs.list, list) {
handle = buf->handle;
handle = &buf->smem;
if (!handle) {
dprintk(VIDC_ERR, "%s - invalid handle\n", __func__);
goto exit;
@ -4203,7 +4193,7 @@ int msm_comm_release_output_buffers(struct msm_vidc_inst *inst)
}
list_del(&buf->list);
msm_comm_smem_free(inst, buf->handle);
msm_comm_smem_free(inst, &buf->smem);
kfree(buf);
}
@ -4232,13 +4222,8 @@ static enum hal_buffer scratch_buf_sufficient(struct msm_vidc_inst *inst,
mutex_lock(&inst->scratchbufs.lock);
list_for_each_entry(buf, &inst->scratchbufs.list, list) {
if (!buf->handle) {
dprintk(VIDC_ERR, "%s: invalid buf handle\n", __func__);
mutex_unlock(&inst->scratchbufs.lock);
goto not_sufficient;
}
if (buf->buffer_type == buffer_type &&
buf->handle->size >= bufreq->buffer_size)
buf->smem.size >= bufreq->buffer_size)
count++;
}
mutex_unlock(&inst->scratchbufs.lock);
@ -4297,13 +4282,7 @@ int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst,
mutex_lock(&inst->scratchbufs.lock);
list_for_each_entry_safe(buf, dummy, &inst->scratchbufs.list, list) {
if (!buf->handle) {
dprintk(VIDC_ERR, "%s - buf->handle NULL\n", __func__);
rc = -EINVAL;
goto exit;
}
handle = buf->handle;
handle = &buf->smem;
buffer_info.buffer_size = handle->size;
buffer_info.buffer_type = buf->buffer_type;
buffer_info.num_buffers = 1;
@ -4335,11 +4314,10 @@ int msm_comm_release_scratch_buffers(struct msm_vidc_inst *inst,
continue;
list_del(&buf->list);
msm_comm_smem_free(inst, buf->handle);
msm_comm_smem_free(inst, handle);
kfree(buf);
}
exit:
mutex_unlock(&inst->scratchbufs.lock);
return rc;
}
@ -4357,9 +4335,9 @@ void msm_comm_release_eos_buffers(struct msm_vidc_inst *inst)
mutex_lock(&inst->eosbufs.lock);
list_for_each_entry_safe(buf, next, &inst->eosbufs.list, list) {
list_del(&buf->list);
msm_comm_smem_free(inst, &buf->smem);
kfree(buf);
}
INIT_LIST_HEAD(&inst->eosbufs.list);
mutex_unlock(&inst->eosbufs.lock);
}
@ -4394,7 +4372,7 @@ int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst)
mutex_lock(&inst->persistbufs.lock);
list_for_each_safe(ptr, next, &inst->persistbufs.list) {
buf = list_entry(ptr, struct internal_buf, list);
handle = buf->handle;
handle = &buf->smem;
buffer_info.buffer_size = handle->size;
buffer_info.buffer_type = buf->buffer_type;
buffer_info.num_buffers = 1;
@ -4420,7 +4398,7 @@ int msm_comm_release_persist_buffers(struct msm_vidc_inst *inst)
mutex_lock(&inst->persistbufs.lock);
}
list_del(&buf->list);
msm_comm_smem_free(inst, buf->handle);
msm_comm_smem_free(inst, handle);
kfree(buf);
}
mutex_unlock(&inst->persistbufs.lock);
@ -5158,19 +5136,20 @@ int msm_comm_kill_session(struct msm_vidc_inst *inst)
return rc;
}
struct msm_smem *msm_comm_smem_alloc(struct msm_vidc_inst *inst,
size_t size, u32 align, u32 flags,
enum hal_buffer buffer_type, int map_kernel)
int msm_comm_smem_alloc(struct msm_vidc_inst *inst,
size_t size, u32 align, u32 flags, enum hal_buffer buffer_type,
int map_kernel, struct msm_smem *smem)
{
struct msm_smem *m = NULL;
int rc = 0;
if (!inst || !inst->core) {
dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst);
return NULL;
return -EINVAL;
}
m = msm_smem_alloc(inst->mem_client, size, align,
flags, buffer_type, map_kernel);
return m;
rc = msm_smem_alloc(size, align, flags, buffer_type, map_kernel,
&(inst->core->resources), inst->session_type,
smem);
return rc;
}
void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem)
@ -5180,7 +5159,7 @@ void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem)
"%s: invalid params: %pK %pK\n", __func__, inst, mem);
return;
}
msm_smem_free(inst->mem_client, mem);
msm_smem_free(mem);
}
int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst,
@ -5191,28 +5170,8 @@ int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst,
"%s: invalid params: %pK %pK\n", __func__, inst, mem);
return -EINVAL;
}
return msm_smem_cache_operations(inst->mem_client, mem, cache_ops);
}
struct msm_smem *msm_comm_smem_user_to_kernel(struct msm_vidc_inst *inst,
int fd, u32 offset, enum hal_buffer buffer_type)
{
struct msm_smem *m = NULL;
if (!inst || !inst->core) {
dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst);
return NULL;
}
if (inst->state == MSM_VIDC_CORE_INVALID) {
dprintk(VIDC_ERR, "Core in Invalid state, returning from %s\n",
__func__);
return NULL;
}
m = msm_smem_user_to_kernel(inst->mem_client,
fd, offset, buffer_type);
return m;
return msm_smem_cache_operations(mem->dma_buf, mem->offset,
mem->size, cache_ops);
}
void msm_vidc_fw_unload_handler(struct work_struct *work)

@ -72,14 +72,13 @@ void msm_comm_session_clean(struct msm_vidc_inst *inst);
int msm_comm_kill_session(struct msm_vidc_inst *inst);
enum multi_stream msm_comm_get_stream_output_mode(struct msm_vidc_inst *inst);
enum hal_buffer msm_comm_get_hal_output_buffer(struct msm_vidc_inst *inst);
struct msm_smem *msm_comm_smem_alloc(struct msm_vidc_inst *inst,
int msm_comm_smem_alloc(struct msm_vidc_inst *inst,
size_t size, u32 align, u32 flags,
enum hal_buffer buffer_type, int map_kernel);
enum hal_buffer buffer_type, int map_kernel,
struct msm_smem *smem);
void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem);
int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst,
struct msm_smem *mem, enum smem_cache_ops cache_ops);
struct msm_smem *msm_comm_smem_user_to_kernel(struct msm_vidc_inst *inst,
int fd, u32 offset, enum hal_buffer buffer_type);
enum hal_video_codec get_hal_codec(int fourcc);
enum hal_domain get_hal_domain(int session_type);
int msm_comm_check_core_init(struct msm_vidc_core *core);

@ -140,7 +140,7 @@ struct eos_buf {
struct internal_buf {
struct list_head list;
enum hal_buffer buffer_type;
struct msm_smem *handle;
struct msm_smem smem;
enum buffer_owner buffer_ownership;
};
@ -282,7 +282,6 @@ struct msm_vidc_inst {
struct msm_vidc_list eosbufs;
struct msm_vidc_list registeredbufs;
struct buffer_requirements buff_req;
void *mem_client;
struct v4l2_ctrl_handler ctrl_handler;
struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1];
struct v4l2_ctrl **cluster;
@ -357,7 +356,7 @@ struct buffer_info {
int size[VIDEO_MAX_PLANES];
unsigned long uvaddr[VIDEO_MAX_PLANES];
phys_addr_t device_addr[VIDEO_MAX_PLANES];
struct msm_smem *handle[VIDEO_MAX_PLANES];
struct msm_smem smem[VIDEO_MAX_PLANES];
enum v4l2_memory memory;
u32 v4l2_index;
bool pending_deletion;
@ -382,20 +381,25 @@ int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
struct buffer_info *binfo);
void msm_comm_handle_thermal_event(void);
void *msm_smem_new_client(enum smem_type mtype,
void *platform_resources, enum session_type stype);
struct msm_smem *msm_smem_alloc(void *clt, size_t size, u32 align, u32 flags,
enum hal_buffer buffer_type, int map_kernel);
void msm_smem_free(void *clt, struct msm_smem *mem);
void msm_smem_delete_client(void *clt);
int msm_smem_cache_operations(void *clt, struct msm_smem *mem,
enum smem_cache_ops);
struct msm_smem *msm_smem_user_to_kernel(void *clt, int fd, u32 offset,
enum hal_buffer buffer_type);
struct context_bank_info *msm_smem_get_context_bank(void *clt,
bool is_secure, enum hal_buffer buffer_type);
int msm_smem_alloc(size_t size, u32 align, u32 flags,
enum hal_buffer buffer_type, int map_kernel,
void *res, u32 session_type, struct msm_smem *smem);
int msm_smem_free(struct msm_smem *mem);
int msm_smem_cache_operations(struct dma_buf *dbuf,
enum smem_cache_ops, unsigned long offset, unsigned long size);
struct context_bank_info *msm_smem_get_context_bank(u32 session_type,
bool is_secure, struct msm_vidc_platform_resources *res,
enum hal_buffer buffer_type);
int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem);
int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem);
struct dma_buf *msm_smem_get_dma_buf(int fd);
void msm_smem_put_dma_buf(void *dma_buf);
bool msm_smem_compare_buffers(int fd, void *dma_buf);
struct msm_smem *msm_smem_user_to_kernel(struct msm_vidc_inst *inst,
int fd, u32 offset,
u32 size, enum hal_buffer buffer_type);
void msm_vidc_fw_unload_handler(struct work_struct *work);
bool msm_smem_compare_buffers(void *clt, int fd, void *priv);
/* XXX: normally should be in msm_vidc.h, but that's meant for public APIs,
* whereas this is private
*/

@ -1378,24 +1378,24 @@ int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
dprintk(VIDC_ERR, "scratch buffer list:\n");
list_for_each_entry(buf, &inst->scratchbufs.list, list)
dprintk(VIDC_ERR, "type: %d addr: %pa size: %u\n",
buf->buffer_type, &buf->handle->device_addr,
buf->handle->size);
buf->buffer_type, &buf->smem.device_addr,
buf->smem.size);
mutex_unlock(&inst->scratchbufs.lock);
mutex_lock(&inst->persistbufs.lock);
dprintk(VIDC_ERR, "persist buffer list:\n");
list_for_each_entry(buf, &inst->persistbufs.list, list)
dprintk(VIDC_ERR, "type: %d addr: %pa size: %u\n",
buf->buffer_type, &buf->handle->device_addr,
buf->handle->size);
buf->buffer_type, &buf->smem.device_addr,
buf->smem.size);
mutex_unlock(&inst->persistbufs.lock);
mutex_lock(&inst->outputbufs.lock);
dprintk(VIDC_ERR, "dpb buffer list:\n");
list_for_each_entry(buf, &inst->outputbufs.list, list)
dprintk(VIDC_ERR, "type: %d addr: %pa size: %u\n",
buf->buffer_type, &buf->handle->device_addr,
buf->handle->size);
buf->buffer_type, &buf->smem.device_addr,
buf->smem.size);
mutex_unlock(&inst->outputbufs.lock);
}
core->smmu_fault_handled = true;

@ -570,17 +570,18 @@ static int __smem_alloc(struct venus_hfi_device *dev,
struct vidc_mem_addr *mem, u32 size, u32 align,
u32 flags, u32 usage)
{
struct msm_smem *alloc = NULL;
struct msm_smem *alloc = &mem->mem_data;
int rc = 0;
if (!dev || !dev->hal_client || !mem || !size) {
if (!dev || !mem || !size) {
dprintk(VIDC_ERR, "Invalid Params\n");
return -EINVAL;
}
dprintk(VIDC_INFO, "start to alloc size: %d, flags: %d\n", size, flags);
alloc = msm_smem_alloc(dev->hal_client, size, align, flags, usage, 1);
if (!alloc) {
rc = msm_smem_alloc(size, align, flags, usage, 1, (void *)dev->res,
MSM_VIDC_UNKNOWN, alloc);
if (rc) {
dprintk(VIDC_ERR, "Alloc failed\n");
rc = -ENOMEM;
goto fail_smem_alloc;
@ -589,7 +590,7 @@ static int __smem_alloc(struct venus_hfi_device *dev,
dprintk(VIDC_DBG, "%s: ptr = %pK, size = %d\n",
__func__,
alloc->kvaddr, size);
rc = msm_smem_cache_operations(dev->hal_client, alloc,
rc = msm_smem_cache_operations(alloc->dma_buf, 0, alloc->size,
SMEM_CACHE_CLEAN);
if (rc) {
dprintk(VIDC_WARN, "Failed to clean cache\n");
@ -597,7 +598,6 @@ static int __smem_alloc(struct venus_hfi_device *dev,
}
mem->mem_size = alloc->size;
mem->mem_data = alloc;
mem->align_virtual_addr = alloc->kvaddr;
mem->align_device_addr = alloc->device_addr;
return rc;
@ -612,7 +612,7 @@ static void __smem_free(struct venus_hfi_device *dev, struct msm_smem *mem)
return;
}
msm_smem_free(dev->hal_client, mem);
msm_smem_free(mem);
}
static void __write_register(struct venus_hfi_device *device,
@ -1750,7 +1750,7 @@ static void __interface_queues_release(struct venus_hfi_device *device)
unsigned long mem_map_table_base_addr;
struct context_bank_info *cb;
if (device->qdss.mem_data) {
if (device->qdss.align_virtual_addr) {
qdss = (struct hfi_mem_map_table *)
device->qdss.align_virtual_addr;
qdss->mem_map_num_entries = num_entries;
@ -1767,8 +1767,8 @@ static void __interface_queues_release(struct venus_hfi_device *device)
}
mem_map = (struct hfi_mem_map *)(qdss + 1);
cb = msm_smem_get_context_bank(device->hal_client,
false, HAL_BUFFER_INTERNAL_CMD_QUEUE);
cb = msm_smem_get_context_bank(MSM_VIDC_UNKNOWN,
false, device->res, HAL_BUFFER_INTERNAL_CMD_QUEUE);
for (i = 0; cb && i < num_entries; i++) {
iommu_unmap(cb->mapping->domain,
@ -1776,37 +1776,30 @@ static void __interface_queues_release(struct venus_hfi_device *device)
mem_map[i].size);
}
__smem_free(device, device->qdss.mem_data);
__smem_free(device, &device->qdss.mem_data);
}
__smem_free(device, device->iface_q_table.mem_data);
__smem_free(device, device->sfr.mem_data);
__smem_free(device, &device->iface_q_table.mem_data);
__smem_free(device, &device->sfr.mem_data);
for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
device->iface_queues[i].q_hdr = NULL;
device->iface_queues[i].q_array.mem_data = NULL;
device->iface_queues[i].q_array.align_virtual_addr = NULL;
device->iface_queues[i].q_array.align_device_addr = 0;
}
device->iface_q_table.mem_data = NULL;
device->iface_q_table.align_virtual_addr = NULL;
device->iface_q_table.align_device_addr = 0;
device->qdss.mem_data = NULL;
device->qdss.align_virtual_addr = NULL;
device->qdss.align_device_addr = 0;
device->sfr.mem_data = NULL;
device->sfr.align_virtual_addr = NULL;
device->sfr.align_device_addr = 0;
device->mem_addr.mem_data = NULL;
device->mem_addr.align_virtual_addr = NULL;
device->mem_addr.align_device_addr = 0;
msm_smem_delete_client(device->hal_client);
device->hal_client = NULL;
}
static int __get_qdss_iommu_virtual_addr(struct venus_hfi_device *dev,
@ -1900,7 +1893,7 @@ static int __interface_queues_init(struct venus_hfi_device *dev)
mem_addr = &dev->mem_addr;
if (!is_iommu_present(dev->res))
fw_bias = dev->hal_data->firmware_base;
rc = __smem_alloc(dev, mem_addr, q_size, 1, 0,
rc = __smem_alloc(dev, mem_addr, q_size, 1, SMEM_UNCACHED,
HAL_BUFFER_INTERNAL_CMD_QUEUE);
if (rc) {
dprintk(VIDC_ERR, "iface_q_table_alloc_fail\n");
@ -1921,7 +1914,6 @@ static int __interface_queues_init(struct venus_hfi_device *dev)
iface_q->q_array.align_virtual_addr =
mem_addr->align_virtual_addr + offset;
iface_q->q_array.mem_size = VIDC_IFACEQ_QUEUE_SIZE;
iface_q->q_array.mem_data = NULL;
offset += iface_q->q_array.mem_size;
iface_q->q_hdr = VIDC_IFACEQ_GET_QHDR_START_ADDR(
dev->iface_q_table.align_virtual_addr, i);
@ -1930,7 +1922,7 @@ static int __interface_queues_init(struct venus_hfi_device *dev)
if ((msm_vidc_fw_debug_mode & HFI_DEBUG_MODE_QDSS) && num_entries) {
rc = __smem_alloc(dev, mem_addr,
ALIGNED_QDSS_SIZE, 1, 0,
ALIGNED_QDSS_SIZE, 1, SMEM_UNCACHED,
HAL_BUFFER_INTERNAL_CMD_QUEUE);
if (rc) {
dprintk(VIDC_WARN,
@ -1947,7 +1939,7 @@ static int __interface_queues_init(struct venus_hfi_device *dev)
}
rc = __smem_alloc(dev, mem_addr,
ALIGNED_SFR_SIZE, 1, 0,
ALIGNED_SFR_SIZE, 1, SMEM_UNCACHED,
HAL_BUFFER_INTERNAL_CMD_QUEUE);
if (rc) {
dprintk(VIDC_WARN, "sfr_alloc_fail: SFR not will work\n");
@ -2012,7 +2004,7 @@ static int __interface_queues_init(struct venus_hfi_device *dev)
&dev->iface_q_table.align_device_addr);
}
if (dev->qdss.mem_data) {
if (dev->qdss.align_virtual_addr) {
qdss = (struct hfi_mem_map_table *)dev->qdss.align_virtual_addr;
qdss->mem_map_num_entries = num_entries;
mem_map_table_base_addr = dev->qdss.align_device_addr +
@ -2027,8 +2019,8 @@ static int __interface_queues_init(struct venus_hfi_device *dev)
}
mem_map = (struct hfi_mem_map *)(qdss + 1);
cb = msm_smem_get_context_bank(dev->hal_client, false,
HAL_BUFFER_INTERNAL_CMD_QUEUE);
cb = msm_smem_get_context_bank(MSM_VIDC_UNKNOWN, false,
dev->res, HAL_BUFFER_INTERNAL_CMD_QUEUE);
if (!cb) {
dprintk(VIDC_ERR,
@ -2040,8 +2032,7 @@ static int __interface_queues_init(struct venus_hfi_device *dev)
if (rc) {
dprintk(VIDC_ERR,
"IOMMU mapping failed, Freeing qdss memdata\n");
__smem_free(dev, dev->qdss.mem_data);
dev->qdss.mem_data = NULL;
__smem_free(dev, &dev->qdss.mem_data);
dev->qdss.align_virtual_addr = NULL;
dev->qdss.align_device_addr = 0;
}
@ -2195,28 +2186,14 @@ static int venus_hfi_core_init(void *device)
__set_registers(dev);
if (!dev->hal_client) {
dev->hal_client = msm_smem_new_client(
SMEM_ION, dev->res, MSM_VIDC_UNKNOWN);
if (dev->hal_client == NULL) {
dprintk(VIDC_ERR, "Failed to alloc ION_Client\n");
rc = -ENODEV;
goto err_core_init;
}
dprintk(VIDC_DBG, "Dev_Virt: %pa, Reg_Virt: %pK\n",
&dev->hal_data->firmware_base,
dev->hal_data->register_base);
dprintk(VIDC_DBG, "Dev_Virt: %pa, Reg_Virt: %pK\n",
&dev->hal_data->firmware_base,
dev->hal_data->register_base);
rc = __interface_queues_init(dev);
if (rc) {
dprintk(VIDC_ERR, "failed to init queues\n");
rc = -ENOMEM;
goto err_core_init;
}
} else {
dprintk(VIDC_ERR, "hal_client exists\n");
rc = -EEXIST;
rc = __interface_queues_init(dev);
if (rc) {
dprintk(VIDC_ERR, "failed to init queues\n");
rc = -ENOMEM;
goto err_core_init;
}

@ -125,7 +125,7 @@ struct vidc_mem_addr {
phys_addr_t align_device_addr;
u8 *align_virtual_addr;
u32 mem_size;
struct msm_smem *mem_data;
struct msm_smem mem_data;
};
struct vidc_iface_q_info {
@ -235,7 +235,6 @@ struct venus_hfi_device {
struct vidc_mem_addr sfr;
struct vidc_mem_addr mem_addr;
struct vidc_iface_q_info iface_queues[VIDC_IFACEQ_NUMQ];
struct smem_client *hal_client;
struct hal_data *hal_data;
struct workqueue_struct *vidc_workq;
struct workqueue_struct *venus_pm_workq;

@ -27,8 +27,10 @@ enum smem_type {
};
enum smem_prop {
SMEM_CACHED,
SMEM_SECURE,
SMEM_UNCACHED = 0x1,
SMEM_CACHED = 0x2,
SMEM_SECURE = 0x4,
SMEM_ADSP = 0x8,
};
/* NOTE: if you change this enum you MUST update the
@ -58,6 +60,7 @@ struct dma_mapping_info {
struct sg_table *table;
struct dma_buf_attachment *attach;
struct dma_buf *buf;
void *cb_info;
};
struct msm_smem {

@ -209,7 +209,7 @@ DECLARE_EVENT_CLASS(msm_smem_buffer_ion_ops,
flags, map_kernel),
TP_STRUCT__entry(
__field(char *, buffer_op)
__string(buffer_op, buffer_op)
__field(u32, buffer_type)
__field(u32, heap_mask)
__field(u32, size)
@ -219,7 +219,7 @@ DECLARE_EVENT_CLASS(msm_smem_buffer_ion_ops,
),
TP_fast_assign(
__entry->buffer_op = buffer_op;
__assign_str(buffer_op, buffer_op);
__entry->buffer_type = buffer_type;
__entry->heap_mask = heap_mask;
__entry->size = size;
@ -230,7 +230,7 @@ DECLARE_EVENT_CLASS(msm_smem_buffer_ion_ops,
TP_printk(
"%s, buffer_type : 0x%x, heap_mask : 0x%x, size : 0x%x, align : 0x%x, flags : 0x%x, map_kernel : %d",
__entry->buffer_op,
__get_str(buffer_op),
__entry->buffer_type,
__entry->heap_mask,
__entry->size,
@ -256,6 +256,62 @@ DEFINE_EVENT(msm_smem_buffer_ion_ops, msm_smem_buffer_ion_op_end,
TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
flags, map_kernel)
);
DECLARE_EVENT_CLASS(msm_smem_buffer_dma_ops,
TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask,
size_t size, u32 align, u32 flags, int map_kernel),
TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
flags, map_kernel),
TP_STRUCT__entry(
__string(buffer_op, buffer_op)
__field(u32, buffer_type)
__field(u32, heap_mask)
__field(u32, size)
__field(u32, align)
__field(u32, flags)
__field(int, map_kernel)
),
TP_fast_assign(
__assign_str(buffer_op, buffer_op);
__entry->buffer_type = buffer_type;
__entry->heap_mask = heap_mask;
__entry->size = size;
__entry->align = align;
__entry->flags = flags;
__entry->map_kernel = map_kernel;
),
TP_printk(
"%s, buffer_type : 0x%x, heap_mask : 0x%x, size : 0x%x, align : 0x%x, flags : 0x%x, map_kernel : %d",
__get_str(buffer_op),
__entry->buffer_type,
__entry->heap_mask,
__entry->size,
__entry->align,
__entry->flags,
__entry->map_kernel)
);
DEFINE_EVENT(msm_smem_buffer_dma_ops, msm_smem_buffer_dma_op_start,
TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask,
size_t size, u32 align, u32 flags, int map_kernel),
TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
flags, map_kernel)
);
DEFINE_EVENT(msm_smem_buffer_dma_ops, msm_smem_buffer_dma_op_end,
TP_PROTO(char *buffer_op, u32 buffer_type, u32 heap_mask,
size_t size, u32 align, u32 flags, int map_kernel),
TP_ARGS(buffer_op, buffer_type, heap_mask, size, align,
flags, map_kernel)
);
DECLARE_EVENT_CLASS(msm_smem_buffer_iommu_ops,
@ -266,7 +322,7 @@ DECLARE_EVENT_CLASS(msm_smem_buffer_iommu_ops,
TP_ARGS(buffer_op, domain_num, partition_num, align, iova, buffer_size),
TP_STRUCT__entry(
__field(char *, buffer_op)
__string(buffer_op, buffer_op)
__field(int, domain_num)
__field(int, partition_num)
__field(unsigned long, align)
@ -275,7 +331,7 @@ DECLARE_EVENT_CLASS(msm_smem_buffer_iommu_ops,
),
TP_fast_assign(
__entry->buffer_op = buffer_op;
__assign_str(buffer_op, buffer_op);
__entry->domain_num = domain_num;
__entry->partition_num = partition_num;
__entry->align = align;
@ -285,7 +341,7 @@ DECLARE_EVENT_CLASS(msm_smem_buffer_iommu_ops,
TP_printk(
"%s, domain : %d, partition : %d, align : %lx, iova : 0x%lx, buffer_size=%lx",
__entry->buffer_op,
__get_str(buffer_op),
__entry->domain_num,
__entry->partition_num,
__entry->align,

Loading…
Cancel
Save