MLK-21448-3: [VPU]: 4.19 integration on 8QXP/QM
authorZhou Peng <eagle.zhou@nxp.com>
Mon, 15 Apr 2019 04:12:35 +0000 (12:12 +0800)
committerLeonard Crestez <leonard.crestez@nxp.com>
Wed, 17 Apr 2019 23:51:34 +0000 (02:51 +0300)
Add encoder driver: drivers/mxc/vpu_windsor

Signed-off-by: Zhou Peng <eagle.zhou@nxp.com>
14 files changed:
drivers/mxc/vpu_windsor/Kconfig [new file with mode: 0644]
drivers/mxc/vpu_windsor/Makefile [new file with mode: 0644]
drivers/mxc/vpu_windsor/mediasys_types.h [new file with mode: 0644]
drivers/mxc/vpu_windsor/vpu_encoder_b0.c [new file with mode: 0644]
drivers/mxc/vpu_windsor/vpu_encoder_b0.h [new file with mode: 0644]
drivers/mxc/vpu_windsor/vpu_encoder_config.h [new file with mode: 0644]
drivers/mxc/vpu_windsor/vpu_encoder_ctrl.c [new file with mode: 0644]
drivers/mxc/vpu_windsor/vpu_encoder_ctrl.h [new file with mode: 0644]
drivers/mxc/vpu_windsor/vpu_encoder_mem.c [new file with mode: 0644]
drivers/mxc/vpu_windsor/vpu_encoder_mem.h [new file with mode: 0644]
drivers/mxc/vpu_windsor/vpu_encoder_rpc.c [new file with mode: 0644]
drivers/mxc/vpu_windsor/vpu_encoder_rpc.h [new file with mode: 0644]
drivers/mxc/vpu_windsor/vpu_event_msg.c [new file with mode: 0644]
drivers/mxc/vpu_windsor/vpu_event_msg.h [new file with mode: 0644]

diff --git a/drivers/mxc/vpu_windsor/Kconfig b/drivers/mxc/vpu_windsor/Kconfig
new file mode 100644 (file)
index 0000000..11562a6
--- /dev/null
@@ -0,0 +1,25 @@
+#
+# Codec configuration
+#
+
+menu "MXC VPU(Video Processing Unit) WINDSOR ENCODER support"
+
+config MXC_VPU_WINDSOR
+         tristate "Support for MXC VPU(Video Processing Unit) WINDSOR ENCODER"
+         depends on MEDIA_SUPPORT
+         depends on VIDEO_DEV
+         depends on VIDEO_V4L2
+         select VIDEOBUF2_DMA_CONTIG
+         select VIDEOBUF2_VMALLOC
+         default y
+       ---help---
+         The VPU codec device provides codec function for H.264 etc.
+
+config MXC_VPU_WINDSOR_DEBUG
+       bool "MXC VPU WINDSOR ENCODER debugging"
+       depends on MXC_VPU_WINDSOR != n
+       help
+         This is an option for the developers; most people should
+         say N here.  This enables MXC VPU WINDSOR Encoder driver debugging.
+
+endmenu
diff --git a/drivers/mxc/vpu_windsor/Makefile b/drivers/mxc/vpu_windsor/Makefile
new file mode 100644 (file)
index 0000000..14bbfe6
--- /dev/null
@@ -0,0 +1,15 @@
+##
+## Makefile for the VPU and M0 driver
+##
+
+EXTRA_CFLAGS += $(DEFINES)
+
+obj-$(CONFIG_MXC_VPU_WINDSOR) = vpu-windsor.o
+vpu-windsor-objs = vpu_encoder_b0.o \
+       vpu_encoder_ctrl.o \
+       vpu_event_msg.o \
+       vpu_encoder_mem.o \
+       vpu_encoder_rpc.o
+
+clean:
+       rm -rf $(vpu-windsor-objs)
diff --git a/drivers/mxc/vpu_windsor/mediasys_types.h b/drivers/mxc/vpu_windsor/mediasys_types.h
new file mode 100644 (file)
index 0000000..62a187a
--- /dev/null
@@ -0,0 +1,705 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 NXP. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MEDIASYS_TYPES_H_
+#define _MEDIASYS_TYPES_H_
+
+typedef unsigned int u_int32;
+typedef unsigned char u_int8;
+typedef unsigned long u_int64;
+typedef unsigned int BOOL;
+typedef int int32;
+#define FALSE 0
+#define TRUE 1
+#define VID_API_NUM_STREAMS 8
+#define VID_API_MAX_BUF_PER_STR 3
+#define VID_API_MAX_NUM_MVC_VIEWS 4
+#define MEDIAIP_MAX_NUM_MALONES 2
+#define MEDIAIP_MAX_NUM_MALONE_IRQ_PINS 2
+#define MEDIAIP_MAX_NUM_WINDSORS 1
+#define MEDIAIP_MAX_NUM_WINDSOR_IRQ_PINS 2
+#define MEDIAIP_MAX_NUM_CMD_IRQ_PINS 2
+#define MEDIAIP_MAX_NUM_MSG_IRQ_PINS 1
+#define MEDIAIP_MAX_NUM_TIMER_IRQ_PINS 4
+#define MEDIAIP_MAX_NUM_TIMER_IRQ_SLOTS 4
+#define VID_API_COMMAND_LIMIT    64
+#define VID_API_MESSAGE_LIMIT    256
+
+#define API_CMD_AVAILABLE            0x0
+#define API_CMD_INCOMPLETE           0x1
+#define API_CMD_BUFFER_ERROR         0x2
+#define API_CMD_UNAVAILABLE          0x3
+#define API_MSG_AVAILABLE            0x0
+#define API_MSG_INCOMPLETE           0x1
+#define API_MSG_BUFFER_ERROR         0x2
+#define API_MSG_UNAVAILABLE          0x3
+#define MEDIAIP_ENC_USER_DATA_WORDS  16
+#define MEDIAIP_MAX_NUM_WINDSOR_SRC_FRAMES 0x6
+#define MEDIAIP_MAX_NUM_WINDSOR_REF_FRAMES 0x3
+
+typedef enum {
+       GTB_ENC_CMD_NOOP        = 0x0,
+       GTB_ENC_CMD_STREAM_START,
+       GTB_ENC_CMD_FRAME_ENCODE,
+       GTB_ENC_CMD_FRAME_SKIP,
+       GTB_ENC_CMD_STREAM_STOP,
+       GTB_ENC_CMD_PARAMETER_UPD,
+       GTB_ENC_CMD_TERMINATE,
+       GTB_ENC_CMD_SNAPSHOT,
+       GTB_ENC_CMD_ROLL_SNAPSHOT,
+       GTB_ENC_CMD_LOCK_SCHEDULER,
+       GTB_ENC_CMD_UNLOCK_SCHEDULER,
+       GTB_ENC_CMD_CONFIGURE_CODEC,
+       GTB_ENC_CMD_DEAD_MARK,
+       GTB_ENC_CMD_FIRM_RESET,
+       GTB_ENC_CMD_RESERVED
+} GTB_ENC_CMD;
+
+typedef enum {
+       VID_API_EVENT_UNDEFINED = 0x0,
+       VID_API_ENC_EVENT_RESET_DONE = 0x1,
+       VID_API_ENC_EVENT_START_DONE,
+       VID_API_ENC_EVENT_STOP_DONE,
+       VID_API_ENC_EVENT_TERMINATE_DONE,
+       VID_API_ENC_EVENT_FRAME_INPUT_DONE,
+       VID_API_ENC_EVENT_FRAME_DONE,
+       VID_API_ENC_EVENT_FRAME_RELEASE,
+       VID_API_ENC_EVENT_PARA_UPD_DONE,
+       VID_API_ENC_EVENT_MEM_REQUEST,
+       VID_API_ENC_EVENT_FIRMWARE_XCPT,
+       VID_API_ENC_EVENT_RESERVED
+} ENC_TB_API_ENC_EVENT;
+
+typedef enum {
+       MEDIAIP_ENC_PIC_TYPE_B_FRAME = 0,
+       MEDIAIP_ENC_PIC_TYPE_P_FRAME,
+       MEDIAIP_ENC_PIC_TYPE_I_FRAME,
+       MEDIAIP_ENC_PIC_TYPE_IDR_FRAME,
+       MEDIAIP_ENC_PIC_TYPE_BI_FRAME
+
+} MEDIAIP_ENC_PIC_TYPE, *pMEDIAIP_ENC_PIC_TYPE;
+
+typedef struct {
+       u_int32                   uMemPhysAddr;
+       u_int32                   uMemVirtAddr;
+       u_int32                   uMemSize;
+} MEDIAIP_ENC_MEM_RESOURCE, *pMEDIAIP_ENC_MEM_RESOURCE;
+
+typedef struct {
+       u_int32                    uEncFrmSize;
+       u_int32                    uEncFrmNum;
+       u_int32                    uRefFrmSize;
+       u_int32                    uRefFrmNum;
+       u_int32                    uActBufSize;
+} MEDIAIP_ENC_MEM_REQ_DATA, *pMEDIAIP_ENC_MEM_REQ_DATA;
+
+typedef struct {
+       MEDIAIP_ENC_MEM_RESOURCE  tEncFrameBuffers[MEDIAIP_MAX_NUM_WINDSOR_SRC_FRAMES];
+       MEDIAIP_ENC_MEM_RESOURCE  tRefFrameBuffers[MEDIAIP_MAX_NUM_WINDSOR_REF_FRAMES];
+       MEDIAIP_ENC_MEM_RESOURCE  tActFrameBufferArea;
+} MEDIAIP_ENC_MEM_POOL, *pMEDIAIP_ENC_MEM_POOL;
+
+///////////////////////////////////////////
+// MEDIAIP_ENC_PIC_TYPE
+
+typedef struct {
+       u_int32              uFrameID;
+       u_int32              uPicEncodDone;
+       MEDIAIP_ENC_PIC_TYPE ePicType;
+       u_int32              uSkippedFrame;
+       u_int32              uErrorFlag;
+       u_int32              uPSNR;
+       u_int32              uFlushDone;
+       u_int32              uMBy;
+       u_int32              uMBx;
+       u_int32              uFrameSize;
+       u_int32              uFrameEncTtlCycles;
+       u_int32              uFrameEncTtlFrmCycles;
+       u_int32              uFrameEncTtlSlcCycles;
+       u_int32              uFrameEncTtlEncCycles;
+       u_int32              uFrameEncTtlHmeCycles;
+       u_int32              uFrameEncTtlDsaCycles;
+       u_int32              uFrameEncFwCycles;
+       u_int32              uFrameCrc;
+       u_int32              uNumInterrupts_1;
+       u_int32              uNumInterrupts_2;
+       u_int32              uH264POC;
+       u_int32              uRefInfo;
+       u_int32              uPicNum;
+       u_int32              uPicActivity;
+       u_int32              uSceneChange;
+       u_int32              uMBStats;
+       u_int32              uEncCacheCount0;
+       u_int32              uEncCacheCount1;
+       u_int32              uMtlWrStrbCnt;
+       u_int32              uMtlRdStrbCnt;
+       u_int32              uStrBuffWrPtr;
+       u_int32              uDiagnosticEvents;
+
+       u_int32              uProcIaccTotRdCnt;
+       u_int32              uProcDaccTotRdCnt;
+       u_int32              uProcDaccTotWrCnt;
+       u_int32              uProcDaccRegRdCnt;
+       u_int32              uProcDaccRegWrCnt;
+       u_int32              uProcDaccRngRdCnt;
+       u_int32              uProcDaccRngWrCnt;
+
+} MEDIAIP_ENC_PIC_INFO, *pMEDIAIP_ENC_PIC_INFO;
+
+typedef enum {
+       MEDIAIP_PLAYMODE_CONNECTIVITY = 0,
+       MEDIAIP_PLAYMODE_BROADCAST,
+       MEDIAIP_PLAYMODE_BROADCAST_DSS,
+       MEDIAIP_PLAYMODE_LAST = MEDIAIP_PLAYMODE_BROADCAST_DSS
+
+} MEDIA_IP_PLAYMODE;
+
+typedef struct {
+       u_int32 wptr;
+       u_int32 rptr;
+       u_int32 start;
+       u_int32 end;
+
+} BUFFER_DESCRIPTOR_TYPE, *pBUFFER_DESCRIPTOR_TYPE;
+
+typedef struct {
+       u_int32 uWrPtr;
+       u_int32 uRdPtr;
+       u_int32 uStart;
+       u_int32 uEnd;
+       u_int32 uLo;
+       u_int32 uHi;
+
+} MediaIPFW_Video_BufDesc;
+
+typedef struct {
+       u_int32 uCfgCookie;
+
+       u_int32 uNumMalones;
+       u_int32 uMaloneBaseAddress[MEDIAIP_MAX_NUM_MALONES];
+       u_int32 uHifOffset[MEDIAIP_MAX_NUM_MALONES];
+       u_int32 uMaloneIrqPin[MEDIAIP_MAX_NUM_MALONES][MEDIAIP_MAX_NUM_MALONE_IRQ_PINS];
+       u_int32 uMaloneIrqTarget[MEDIAIP_MAX_NUM_MALONES][MEDIAIP_MAX_NUM_MALONE_IRQ_PINS];
+
+       u_int32 uNumWindsors;
+       u_int32 uWindsorBaseAddress[MEDIAIP_MAX_NUM_WINDSORS];
+       u_int32 uWindsorIrqPin[MEDIAIP_MAX_NUM_WINDSORS][MEDIAIP_MAX_NUM_WINDSOR_IRQ_PINS];
+       u_int32 uWindsorIrqTarget[MEDIAIP_MAX_NUM_WINDSORS][MEDIAIP_MAX_NUM_WINDSOR_IRQ_PINS];
+
+       u_int32 uCmdIrqPin[MEDIAIP_MAX_NUM_CMD_IRQ_PINS];
+       u_int32 uCmdIrqTarget[MEDIAIP_MAX_NUM_CMD_IRQ_PINS];
+
+       u_int32 uMsgIrqPin[MEDIAIP_MAX_NUM_MSG_IRQ_PINS];
+       u_int32 uMsgIrqTarget[MEDIAIP_MAX_NUM_MSG_IRQ_PINS];
+
+       u_int32 uSysClkFreq;
+       u_int32 uNumTimers;
+       u_int32 uTimerBaseAddr;
+       u_int32 uTimerIrqPin[MEDIAIP_MAX_NUM_TIMER_IRQ_PINS];
+       u_int32 uTimerIrqTarget[MEDIAIP_MAX_NUM_TIMER_IRQ_PINS];
+       u_int32 uTimerSlots[MEDIAIP_MAX_NUM_TIMER_IRQ_SLOTS];
+
+       u_int32 uGICBaseAddr;
+       u_int32 uUartBaseAddr;
+
+       u_int32 uDPVBaseAddr;
+       u_int32 uDPVIrqPin;
+       u_int32 uDPVIrqTarget;
+
+       u_int32 uPixIfBaseAddr;
+
+       u_int32 pal_trace_level;
+       u_int32 pal_trace_destination;
+
+       u_int32 pal_trace_level1;
+       u_int32 pal_trace_destination1;
+
+       u_int32 uHeapBase;
+       u_int32 uHeapSize;
+
+       u_int32 uFSLCacheBaseAddr[2];
+
+} MEDIAIP_FW_SYSTEM_CONFIG, *pMEDIAIP_FW_SYSTEM_CONFIG;
+
+typedef struct {
+       u_int32   uFrameID;
+       u_int32   uLumaBase;
+       u_int32   uChromaBase;
+       u_int32   uParamIdx;
+       u_int32   uKeyFrame;
+} MEDIAIP_ENC_YUV_BUFFER_DESC, *pMEDIAIP_ENC_YUV_BUFFER_DESC;
+
+typedef struct {
+       u_int32 use_ame;
+
+       u_int32 cme_mvx_max;
+       u_int32 cme_mvy_max;
+       u_int32 ame_prefresh_y0;
+       u_int32 ame_prefresh_y1;
+       u_int32 fme_min_sad;
+       u_int32 cme_min_sad;
+
+       u_int32 fme_pred_int_weight;
+       u_int32 fme_pred_hp_weight;
+       u_int32 fme_pred_qp_weight;
+       u_int32 fme_cost_weight;
+       u_int32 fme_act_thold;
+       u_int32 fme_sad_thold;
+       u_int32 fme_zero_sad_thold;
+
+       u_int32 fme_lrg_mvx_lmt;
+       u_int32 fme_lrg_mvy_lmt;
+       u_int32 fme_force_mode;
+       u_int32 fme_force4mvcost;
+       u_int32 fme_force2mvcost;
+
+       u_int32 h264_inter_thrd;
+
+       u_int32 i16x16_mode_cost;
+       u_int32 i4x4_mode_lambda;
+       u_int32 i8x8_mode_lambda;
+
+       u_int32 inter_mod_mult;
+       u_int32 inter_sel_mult;
+       u_int32 inter_bid_cost;
+       u_int32 inter_bwd_cost;
+       u_int32 inter_4mv_cost;
+       int32   one_mv_i16_cost;
+       int32   one_mv_i4x4_cost;
+       int32   one_mv_i8x8_cost;
+       int32   two_mv_i16_cost;
+       int32   two_mv_i4x4_cost;
+       int32   two_mv_i8x8_cost;
+       int32   four_mv_i16_cost;
+       int32   four_mv_i4x4_cost;
+       int32   four_mv_i8x8_cost;
+
+       u_int32 intra_pred_enab;
+       u_int32 intra_chr_pred;
+       u_int32 intra16_pred;
+       u_int32 intra4x4_pred;
+       u_int32 intra8x8_pred;
+
+       u_int32 cb_base;
+       u_int32 cb_size;
+       u_int32 cb_head_room;
+
+       u_int32 mem_page_width;
+       u_int32 mem_page_height;
+       u_int32 mem_total_size;
+       u_int32 mem_chunk_phys_addr;
+       u_int32 mem_chunk_virt_addr;
+       u_int32 mem_chunk_size;
+       u_int32 mem_y_stride;
+       u_int32 mem_uv_stride;
+
+       u_int32 split_wr_enab;
+       u_int32 split_wr_req_size;
+       u_int32 split_rd_enab;
+       u_int32 split_rd_req_size;
+
+} MEDIAIP_ENC_CALIB_PARAMS, *pMEDIAIP_ENC_CALIB_PARAMS;
+
+typedef struct {
+       u_int32 ParamChange;
+
+       u_int32 start_frame;                // These variables are for debugging purposes only
+       u_int32 end_frame;
+
+       u_int32 userdata_enable;
+       u_int32 userdata_id[4];
+       u_int32 userdata_message[MEDIAIP_ENC_USER_DATA_WORDS];
+       u_int32 userdata_length;
+
+       u_int32 h264_profile_idc;
+       u_int32 h264_level_idc;
+       u_int32 h264_au_delimiter;          // Enable the use of Access Unit Delimiters
+       u_int32 h264_seq_end_code;          // Enable the use of Sequence End Codes
+       u_int32 h264_recovery_points;       // Enable the use of Recovery Points (must be with a fixed GOP structure)
+       u_int32 h264_vui_parameters;        // Enable the use of VUI parameters (for rate control purposes)
+       u_int32 h264_aspect_ratio_present;
+       u_int32 h264_aspect_ratio_sar_width;
+       u_int32 h264_aspect_ratio_sar_height;
+       u_int32 h264_overscan_present;
+       u_int32 h264_video_type_present;
+       u_int32 h264_video_format;
+       u_int32 h264_video_full_range;
+       u_int32 h264_video_colour_descriptor;
+       u_int32 h264_video_colour_primaries;
+       u_int32 h264_video_transfer_char;
+       u_int32 h264_video_matrix_coeff;
+       u_int32 h264_chroma_loc_info_present;
+       u_int32 h264_chroma_loc_type_top;
+       u_int32 h264_chroma_loc_type_bot;
+       u_int32 h264_timing_info_present;
+       u_int32 h264_buffering_period_present;
+       u_int32 h264_low_delay_hrd_flag;
+
+       u_int32 aspect_ratio;
+       u_int32 test_mode;                  // Automated firmware test mode
+       u_int32 dsa_test_mode;              // Automated test mode for the DSA.
+       u_int32 fme_test_mode;              // Automated test mode for the fme
+
+       u_int32 cbr_row_mode;               //0: FW mode; 1: HW mode
+       u_int32 windsor_mode;               //0: normal mode; 1: intra only mode; 2: intra+0MV mode
+       u_int32 encode_mode;                // H264, VC1, MPEG2, DIVX
+       u_int32 frame_width;                // display width
+       u_int32 frame_height;               // display height
+       u_int32 enc_frame_width;            // encoding width, should be 16-pix align
+       u_int32 enc_frame_height;           // encoding height, should be 16-pix aligned for progressive and 32-pix aligned for interlace
+       u_int32 frame_rate_num;
+       u_int32 frame_rate_den;
+
+       u_int32 vi_field_source;              // vi input source is frame or field
+       u_int32 vi_frame_width;
+       u_int32 vi_frame_height;
+       u_int32 crop_frame_width;
+       u_int32 crop_frame_height;
+       u_int32 crop_x_start_posn;
+       u_int32 crop_y_start_posn;
+       u_int32 mode422;
+       u_int32 mode_yuy2;
+       u_int32 dsa_luma_en;
+       u_int32 dsa_chroma_en;
+       u_int32 dsa_ext_hfilt_en;
+       u_int32 dsa_di_en;
+       u_int32 dsa_di_top_ref;
+       u_int32 dsa_vertf_disable;   // disable the vertical filter.
+       u_int32 dsa_disable_pwb;
+       u_int32 dsa_hor_phase;
+       u_int32 dsa_ver_phase;
+
+       u_int32 dsa_iac_enable;      // IAC / DSA cannot operate independently in FW so this variable controls
+       u_int32 iac_sc_threshold;
+       u_int32 iac_vm_threshold;
+       u_int32 iac_skip_mode;
+       u_int32 iac_grp_width;
+       u_int32 iac_grp_height;
+
+       u_int32 rate_control_mode;
+       u_int32 rate_control_resolution;
+       u_int32 buffer_size;
+       u_int32 buffer_level_init;
+       u_int32 buffer_I_bit_budget;
+
+       u_int32 top_field_first;
+
+       u_int32 intra_lum_qoffset;
+       u_int32 intra_chr_qoffset;
+       u_int32 inter_lum_qoffset;
+       u_int32 inter_chr_qoffset;
+       u_int32 use_def_scaling_mtx;
+
+       u_int32 inter_8x8_enab;
+       u_int32 inter_4x4_enab;
+
+       u_int32 fme_enable_qpel;
+       u_int32 fme_enable_hpel;
+       u_int32 fme_nozeromv;               // can force the FME not to do the (0,0) search.
+       u_int32 fme_predmv_en;
+       u_int32 fme_pred_2mv4mv;
+       u_int32 fme_smallsadthresh;
+
+       u_int32 ame_en_lmvc;
+       u_int32 ame_x_mult;
+       u_int32 cme_enable_4mv;             // Enable the use of 4MV partitioning
+       u_int32 cme_enable_1mv;
+       u_int32 hme_enable_16x8mv;
+       u_int32 hme_enable_8x16mv;
+       u_int32 cme_mv_weight;              // CME motion vector decisions are made by combining these
+       u_int32 cme_mv_cost;                // cost and weight variables
+       u_int32 ame_mult_mv;
+       u_int32 ame_shift_mv;
+
+       u_int32 hme_forceto1mv_en;
+       u_int32 hme_2mv_cost;               // the cost of choosing a 2MV mode over 1MV.
+       u_int32 hme_pred_mode;
+       u_int32 hme_sc_rnge;
+       u_int32 hme_sw_rnge;
+
+       // for windsor pes , add by fulin
+       u_int32 output_format;     // 0: output ES; 1: output PES
+       u_int32 timestamp_enab;    // 0: have timestamps in all frame; 1: have timestamps in I and P frame; 2: have timestamps only in I frame
+       u_int32 initial_PTS_enab;  // if enabled , use following value,else compute by fw
+       u_int32 initial_PTS;       // the initial value of PTS in the first frame (ms)
+
+} MEDIAIP_ENC_CONFIG_PARAMS, *pMEDIAIP_ENC_CONFIG_PARAMS;
+
+typedef struct {
+       u_int32 ParamChange;
+
+       u_int32 gop_length;
+
+       u_int32 rate_control_bitrate;
+       u_int32 rate_control_bitrate_min;
+       u_int32 rate_control_bitrate_max;
+       u_int32 rate_control_content_models;
+       u_int32 rate_control_iframe_maxsize; // Maximum size of I frame generated by BPM in comparison to ideal (/4)
+       u_int32 rate_control_qp_init;
+       u_int32 rate_control_islice_qp;
+       u_int32 rate_control_pslice_qp;
+       u_int32 rate_control_bslice_qp;
+
+       u_int32 adaptive_quantization;      // Enable the use of activity measures from VIPP in QP assignment
+       u_int32 aq_variance;
+       u_int32 cost_optimization;          // Enable picture/frame level adjustments of the cost parameters by FW.
+       u_int32 fdlp_mode;                  // Frequency-domain low-pass filter control, 0: off, 1-4: specific, 5: adaptive
+       u_int32 enable_isegbframes;         // Enable the use of B frames in the first segment of a GOP
+       u_int32 enable_adaptive_keyratio;   // Enable the use of an adaptive I to P/B ratio (aims to reduce distortion)
+       u_int32 keyratio_imin;              // Clamps applied to picture size ratios
+       u_int32 keyratio_imax;
+       u_int32 keyratio_pmin;
+       u_int32 keyratio_pmax;
+       u_int32 keyratio_bmin;
+       u_int32 keyratio_bmax;
+       int32   keyratio_istep;
+       int32   keyratio_pstep;
+       int32   keyratio_bstep;
+
+       u_int32 enable_paff;                // Enable Picture Adaptive Frame/Field
+       u_int32 enable_b_frame_ref;         // Enable B frame as references
+       u_int32 enable_adaptive_gop;        // Enable an adaptive GOP structure
+       u_int32 enable_closed_gop;          // Enable a closed GOP structure
+                                                                         // i.e. if enabled, the first consecutive B frames following
+                                                                         // an I frame in each GOP will be intra or backwards only coded
+                                                                         // and do not rely on previous reference pictures.
+       u_int32 open_gop_refresh_freq;      // Controls the insertion of closed GOP's (or IDR GOP's in H.264)
+       u_int32 enable_adaptive_sc;         // Enable adaptive scene change GOP structure (0:off, 1:adaptive, 2:IDR)
+       u_int32 enable_fade_detection;      // Enable fade detection and associated motion estimation restrictions
+       int32   fade_detection_threshold;   // Threshold at which the activity slope indicates a possible fading event
+       u_int32 enable_repeat_b;            // Enalbe the repeated B frame mode at CBR
+       u_int32 enable_low_delay_b;         // Use low delay-b frames with an IPPPP style GOP
+
+} MEDIAIP_ENC_STATIC_PARAMS, *pMEDIAIP_ENC_STATIC_PARAMS;
+
+typedef struct {
+       u_int32 ParamChange;
+
+       u_int32 rows_per_slice;
+
+       u_int32 mbaff_enable;                // Macroblock adaptive frame/field enable
+       u_int32 dbf_enable;                  // Enable the deblocking filter
+
+       u_int32 field_source;                // progressive/interlaced control
+       u_int32 gop_b_length;                // Number of B frames between anchor frames
+                                                                         //  (only to be changed at a GOP segment boundary)
+       u_int32 mb_group_size;               // Number of macroblocks normally assigned to a group
+                                                                         // (implications for performance, interrupts and rate control)
+
+       u_int32 cbr_rows_per_group;
+
+       u_int32 skip_enable;                 // Enable the use of skipped macroblocks
+
+       u_int32 pts_bits_0_to_31;            // TO BE REMOVED...
+       u_int32 pts_bit_32;
+
+       u_int32 rm_expsv_cff;
+       u_int32 const_ipred;
+       int32 chr_qp_offset;
+       u_int32 intra_mb_qp_offset;
+
+       u_int32 h264_cabac_init_method;
+       u_int32 h264_cabac_init_idc;
+       u_int32 h264_cabac_enable;                 // Main and stream
+
+       int32 alpha_c0_offset_div2;
+       int32 beta_offset_div2;
+
+       u_int32 intra_prefresh_y0; // for setting intra limits for prog refresh.
+       u_int32 intra_prefresh_y1;
+
+       u_int32 dbg_dump_rec_src;
+
+} MEDIAIP_ENC_DYN_PARAMS, *pMEDIAIP_ENC_DYN_PARAMS;
+
+typedef struct {
+       MEDIAIP_ENC_CALIB_PARAMS   Calib;
+       MEDIAIP_ENC_CONFIG_PARAMS  Config;
+       MEDIAIP_ENC_STATIC_PARAMS  Static;
+       MEDIAIP_ENC_DYN_PARAMS     Dynamic;
+} MEDIAIP_ENC_EXPERT_MODE_PARAM, *pMEDIAIP_ENC_EXPERT_MODE_PARAM;
+
+typedef enum {
+       MEDIAIP_ENC_FMT_H264 = 0,
+       MEDIAIP_ENC_FMT_VC1,
+       MEDIAIP_ENC_FMT_MPEG2,
+       MEDIAIP_ENC_FMT_MPEG4SP,
+       MEDIAIP_ENC_FMT_H263,
+       MEDIAIP_ENC_FMT_MPEG1,
+       MEDIAIP_ENC_FMT_SHORT_HEADER,
+       MEDIAIP_ENC_FMT_NULL
+
+} MEDIAIP_ENC_FMT;
+
+typedef enum {
+       MEDIAIP_ENC_PROF_MPEG2_SP = 0,
+       MEDIAIP_ENC_PROF_MPEG2_MP,
+       MEDIAIP_ENC_PROF_MPEG2_HP,
+       MEDIAIP_ENC_PROF_H264_BP,
+       MEDIAIP_ENC_PROF_H264_MP,
+       MEDIAIP_ENC_PROF_H264_HP,
+       MEDIAIP_ENC_PROF_MPEG4_SP,
+       MEDIAIP_ENC_PROF_MPEG4_ASP,
+       MEDIAIP_ENC_PROF_VC1_SP,
+       MEDIAIP_ENC_PROF_VC1_MP,
+       MEDIAIP_ENC_PROF_VC1_AP
+
+} MEDIAIP_ENC_PROFILE;
+
+typedef enum {
+       MEDIAIP_ENC_BITRATECONTROLMODE_VBR          = 0x00000001,
+       MEDIAIP_ENC_BITRATECONTROLMODE_CBR          = 0x00000002,
+       MEDIAIP_ENC_BITRATECONTROLMODE_CONSTANT_QP  = 0x00000004   /* Only in debug mode */
+
+} MEDIAIP_ENC_BITRATE_MODE, *pMEDIAIP_ENC_BITRATE_MODE;
+
+typedef struct {
+       MEDIAIP_ENC_FMT           eCodecMode;
+       MEDIAIP_ENC_PROFILE       eProfile;
+       u_int32                   uLevel;
+
+       MEDIAIP_ENC_MEM_RESOURCE  tEncMemDesc;
+
+       u_int32                   uFrameRate;
+       u_int32                   uSrcStride;
+       u_int32                   uSrcWidth;
+       u_int32                   uSrcHeight;
+       u_int32                   uSrcOffset_x;
+       u_int32                   uSrcOffset_y;
+       u_int32                   uSrcCropWidth;
+       u_int32                   uSrcCropHeight;
+       u_int32                   uOutWidth;
+       u_int32                   uOutHeight;
+       u_int32                   uIFrameInterval;
+       u_int32                   uGopBLength;
+       u_int32                   uLowLatencyMode;
+
+       MEDIAIP_ENC_BITRATE_MODE  eBitRateMode;
+       u_int32                   uTargetBitrate;
+       u_int32                   uMaxBitRate;
+       u_int32                   uMinBitRate;
+       u_int32                   uInitSliceQP;
+
+} MEDIAIP_ENC_PARAM, *pMEDIAIP_ENC_PARAM;
+
+typedef struct {
+       u_int32   uFrameID;
+       u_int32   uErrorFlag;   //Error type
+       u_int32   uMBy;
+       u_int32   uMBx;
+       u_int32   uReserved[12];
+
+} ENC_ENCODING_STATUS, *pENC_ENCODING_STATUS;
+
+typedef struct {
+       u_int32   uFrameID;
+       u_int32   uDsaCyle;
+       u_int32   uMBy;
+       u_int32   uMBx;
+       u_int32   uReserved[4];
+
+} ENC_DSA_STATUS_t, *pENC_DSA_STATUS_t;
+
+typedef struct {
+       u_int32                                  pEncYUVBufferDesc;
+       u_int32                                  pEncStreamBufferDesc;
+       u_int32                                  pEncExpertModeParam;
+       u_int32                                  pEncParam;
+       u_int32                                  pEncMemPool;
+       /* Status information for master to read */
+       u_int32                                  pEncEncodingStatus;
+       u_int32                                  pEncDSAStatus;
+} MEDIA_ENC_API_CONTROL_INTERFACE, *pMEDIA_ENC_API_CONTROL_INTERFACE;
+
+typedef struct {
+       u_int32                                 FwExecBaseAddr;
+       u_int32                                 FwExecAreaSize;
+       BUFFER_DESCRIPTOR_TYPE                  StreamCmdBufferDesc;
+       BUFFER_DESCRIPTOR_TYPE                  StreamMsgBufferDesc;
+       u_int32                                 StreamCmdIntEnable[VID_API_NUM_STREAMS];
+       u_int32                                 FWVersion;
+       u_int32                                 uMVDFWOffset;
+       u_int32                                 uMaxEncoderStreams;
+       u_int32                                 pEncCtrlInterface[VID_API_NUM_STREAMS];
+       MEDIAIP_FW_SYSTEM_CONFIG                sSystemCfg;
+       u_int32                                 uApiVersion;
+       BUFFER_DESCRIPTOR_TYPE                  DebugBufferDesc;
+} ENC_RPC_HOST_IFACE, *pENC_RPC_HOST_IFACE;
+
+#define SCB_XREG_SLV_BASE                               0x00000000
+#define SCB_SCB_BLK_CTRL                                0x00070000
+#define SCB_BLK_CTRL_XMEM_RESET_SET                     0x00000090
+#define SCB_BLK_CTRL_CACHE_RESET_SET                    0x000000A0
+#define SCB_BLK_CTRL_CACHE_RESET_CLR                    0x000000A4
+#define SCB_BLK_CTRL_SCB_CLK_ENABLE_SET                 0x00000100
+
+#define XMEM_CONTROL                                    0x00041000
+
+#define DEC_MFD_XREG_SLV_BASE                           0x00180000
+
+#define MFD_HIF                                         0x0001C000
+#define MFD_HIF_MSD_REG_INTERRUPT_STATUS                0x00000018
+#define MFD_SIF                                         0x0001D000
+#define MFD_SIF_CTRL_STATUS                             0x000000F0
+#define MFD_SIF_INTR_STATUS                             0x000000F4
+#define MFD_MCX                                         0x00020800
+#define MFD_MCX_OFF                                     0x00000020
+
+#define MFD_BLK_CTRL                                    0x00030000
+#define MFD_BLK_CTRL_MFD_SYS_RESET_SET                  0x00000000
+#define MFD_BLK_CTRL_MFD_SYS_RESET_CLR                  0x00000004
+#define MFD_BLK_CTRL_MFD_SYS_CLOCK_ENABLE_SET           0x00000100
+#define MFD_BLK_CTRL_MFD_SYS_CLOCK_ENABLE_CLR           0x00000104
+
+#endif
diff --git a/drivers/mxc/vpu_windsor/vpu_encoder_b0.c b/drivers/mxc/vpu_windsor/vpu_encoder_b0.c
new file mode 100644 (file)
index 0000000..ce75954
--- /dev/null
@@ -0,0 +1,5812 @@
+/*
+ * Copyright 2018 NXP
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file vpu_encoder_b0.c
+ *
+ * copyright here may be changed later
+ *
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/videodev2.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/file.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/platform_data/dma-imx.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/pm_runtime.h>
+#include <linux/mx8_mu.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include <soc/imx8/sc/ipc.h>
+#include "vpu_encoder_b0.h"
+#include "vpu_encoder_ctrl.h"
+#include "vpu_encoder_config.h"
+#include "vpu_event_msg.h"
+#include "vpu_encoder_mem.h"
+
+#define VPU_ENC_DRIVER_VERSION         "1.0.1"
+
+struct vpu_frame_info {
+       struct list_head list;
+       MEDIAIP_ENC_PIC_INFO info;
+       u32 bytesleft;
+       u32 wptr;
+       u32 rptr;
+       u32 start;
+       u32 end;
+       bool eos;
+       bool is_start;
+       unsigned long index;
+       struct queue_data *queue;
+       s64 timestamp;
+};
+
+unsigned int vpu_dbg_level_encoder = LVL_ERR | LVL_WARN | LVL_ALL;
+static unsigned int reset_on_hang;
+static unsigned int show_detail_index = VPU_DETAIL_INDEX_DFT;
+static unsigned long debug_firmware_bitmap;
+
+#define ITEM_NAME(name)                \
+                               [name] = #name
+
+static char *cmd2str[] = {
+       ITEM_NAME(GTB_ENC_CMD_NOOP),
+       ITEM_NAME(GTB_ENC_CMD_STREAM_START),
+       ITEM_NAME(GTB_ENC_CMD_FRAME_ENCODE),
+       ITEM_NAME(GTB_ENC_CMD_FRAME_SKIP),
+       ITEM_NAME(GTB_ENC_CMD_STREAM_STOP),
+       ITEM_NAME(GTB_ENC_CMD_PARAMETER_UPD),
+       ITEM_NAME(GTB_ENC_CMD_TERMINATE),
+       ITEM_NAME(GTB_ENC_CMD_SNAPSHOT),
+       ITEM_NAME(GTB_ENC_CMD_ROLL_SNAPSHOT),
+       ITEM_NAME(GTB_ENC_CMD_LOCK_SCHEDULER),
+       ITEM_NAME(GTB_ENC_CMD_UNLOCK_SCHEDULER),
+       ITEM_NAME(GTB_ENC_CMD_CONFIGURE_CODEC),
+       ITEM_NAME(GTB_ENC_CMD_DEAD_MARK),
+       ITEM_NAME(GTB_ENC_CMD_FIRM_RESET),
+       ITEM_NAME(GTB_ENC_CMD_RESERVED)
+};
+
+static char *event2str[] = {
+       ITEM_NAME(VID_API_EVENT_UNDEFINED),
+       ITEM_NAME(VID_API_ENC_EVENT_RESET_DONE),
+       ITEM_NAME(VID_API_ENC_EVENT_START_DONE),
+       ITEM_NAME(VID_API_ENC_EVENT_STOP_DONE),
+       ITEM_NAME(VID_API_ENC_EVENT_TERMINATE_DONE),
+       ITEM_NAME(VID_API_ENC_EVENT_FRAME_INPUT_DONE),
+       ITEM_NAME(VID_API_ENC_EVENT_FRAME_DONE),
+       ITEM_NAME(VID_API_ENC_EVENT_FRAME_RELEASE),
+       ITEM_NAME(VID_API_ENC_EVENT_PARA_UPD_DONE),
+       ITEM_NAME(VID_API_ENC_EVENT_MEM_REQUEST),
+       ITEM_NAME(VID_API_ENC_EVENT_FIRMWARE_XCPT),
+       ITEM_NAME(VID_API_ENC_EVENT_RESERVED)
+};
+
+static int wait_for_start_done(struct core_device *core, int resume);
+static void wait_for_stop_done(struct vpu_ctx *ctx);
+static int sw_reset_firmware(struct core_device *core, int resume);
+static int enable_fps_sts(struct vpu_attr *attr);
+static int disable_fps_sts(struct vpu_attr *attr);
+static int configure_codec(struct vpu_ctx *ctx);
+static struct vpu_frame_info *get_idle_frame(struct queue_data *queue);
+static void put_frame_idle(struct vpu_frame_info *frame);
+static int inc_frame(struct queue_data *queue);
+static void dec_frame(struct vpu_frame_info *frame);
+static int submit_input_and_encode(struct vpu_ctx *ctx);
+static int process_stream_output(struct vpu_ctx *ctx);
+
+static char *get_event_str(u32 event)
+{
+       if (event >= VID_API_ENC_EVENT_RESERVED)
+               return "UNKNOWN EVENT";
+       return event2str[event];
+}
+
+static char *get_cmd_str(u32 cmdid)
+{
+       if (cmdid >= GTB_ENC_CMD_RESERVED)
+               return "UNKNOWN CMD";
+       return cmd2str[cmdid];
+}
+
+static void vpu_log_event(u_int32 uEvent, u_int32 ctxid)
+{
+       if (uEvent >= VID_API_ENC_EVENT_RESERVED)
+               vpu_err("reveive event: 0x%X, ctx id:%d\n",
+                               uEvent, ctxid);
+       else
+               vpu_dbg(LVL_EVT, "recevie event: %s, ctx id:%d\n",
+                               event2str[uEvent], ctxid);
+}
+
+static void vpu_log_cmd(u_int32 cmdid, u_int32 ctxid)
+{
+       if (cmdid >= GTB_ENC_CMD_RESERVED)
+               vpu_err("send cmd: 0x%X, ctx id:%d\n",
+                               cmdid, ctxid);
+       else
+               vpu_dbg(LVL_CMD, "send cmd: %s ctx id:%d\n",
+                               cmd2str[cmdid], ctxid);
+}
+
+static void count_event(struct vpu_ctx *ctx, u32 event)
+{
+       struct vpu_attr *attr;
+
+       WARN_ON(!ctx);
+
+       attr = get_vpu_ctx_attr(ctx);
+       if (!attr)
+               return;
+
+       if (event < VID_API_ENC_EVENT_RESERVED)
+               attr->statistic.event[event]++;
+       else
+               attr->statistic.event[VID_API_ENC_EVENT_RESERVED]++;
+
+       attr->statistic.current_event = event;
+       getrawmonotonic(&attr->statistic.ts_event);
+}
+
+static void count_cmd(struct vpu_attr *attr, u32 cmdid)
+{
+       WARN_ON(!attr);
+
+       if (cmdid < GTB_ENC_CMD_RESERVED)
+               attr->statistic.cmd[cmdid]++;
+       else
+               attr->statistic.cmd[GTB_ENC_CMD_RESERVED]++;
+       attr->statistic.current_cmd = cmdid;
+       getrawmonotonic(&attr->statistic.ts_cmd);
+}
+
+static void count_yuv_input(struct vpu_ctx *ctx)
+{
+       struct vpu_attr *attr = NULL;
+
+       WARN_ON(!ctx);
+
+       attr = get_vpu_ctx_attr(ctx);
+       if (!attr)
+               return;
+
+       attr->statistic.yuv_count++;
+}
+
+static void count_h264_output(struct vpu_ctx *ctx)
+{
+       struct vpu_attr *attr = NULL;
+
+       WARN_ON(!ctx);
+
+       attr = get_vpu_ctx_attr(ctx);
+       if (!attr)
+               return;
+
+       attr->statistic.h264_count++;
+}
+
+static void count_encoded_frame(struct vpu_ctx *ctx)
+{
+       struct vpu_attr *attr = NULL;
+
+       WARN_ON(!ctx);
+
+       attr = get_vpu_ctx_attr(ctx);
+       if (!attr)
+               return;
+
+       attr->statistic.encoded_count++;
+}
+
+static void count_timestamp_overwrite(struct vpu_ctx *ctx)
+{
+       struct vpu_attr *attr = NULL;
+
+       WARN_ON(!ctx);
+
+       attr = get_vpu_ctx_attr(ctx);
+       if (!attr)
+               return;
+
+       attr->statistic.timestamp_overwrite++;
+}
+
+static void write_vpu_reg(struct vpu_dev *dev, u32 val, off_t reg)
+{
+       writel(val, dev->regs_base + reg);
+}
+
+static u32 read_vpu_reg(struct vpu_dev *dev, off_t reg)
+{
+       return readl(dev->regs_base + reg);
+}
+
+/*
+ * v4l2 ioctl() operation
+ *
+ */
+static struct vpu_v4l2_fmt  formats_compressed_enc[] = {
+       {
+               .name       = "H264 Encoded Stream",
+               .fourcc     = V4L2_PIX_FMT_H264,
+               .num_planes = 1,
+               .venc_std   = VPU_VIDEO_AVC,
+               .is_yuv     = 0,
+       },
+};
+
+static struct vpu_v4l2_fmt  formats_yuv_enc[] = {
+       {
+               .name       = "4:2:0 2 Planes Y/CbCr",
+               .fourcc     = V4L2_PIX_FMT_NV12,
+               .num_planes     = 2,
+               .venc_std   = VPU_PF_YUV420_SEMIPLANAR,
+               .is_yuv     = 1,
+       },
+};
+
+static void vpu_ctx_send_cmd(struct vpu_ctx *ctx, uint32_t cmdid,
+                               uint32_t cmdnum, uint32_t *local_cmddata);
+
+static void MU_sendMesgToFW(void __iomem *base, MSG_Type type, uint32_t value)
+{
+       MU_SendMessage(base, 1, value);
+       MU_SendMessage(base, 0, type);
+}
+
+#define GET_CTX_RPC(ctx, func) \
+               func(&ctx->core_dev->shared_mem, ctx->str_index)
+
+pMEDIAIP_ENC_YUV_BUFFER_DESC get_rpc_yuv_buffer_desc(struct vpu_ctx *ctx)
+{
+       return GET_CTX_RPC(ctx, rpc_get_yuv_buffer_desc);
+}
+
+pBUFFER_DESCRIPTOR_TYPE get_rpc_stream_buffer_desc(struct vpu_ctx *ctx)
+{
+       return GET_CTX_RPC(ctx, rpc_get_stream_buffer_desc);
+}
+
+pMEDIAIP_ENC_EXPERT_MODE_PARAM get_rpc_expert_mode_param(struct vpu_ctx *ctx)
+{
+       return GET_CTX_RPC(ctx, rpc_get_expert_mode_param);
+}
+
+pMEDIAIP_ENC_PARAM get_rpc_enc_param(struct vpu_ctx *ctx)
+{
+       return GET_CTX_RPC(ctx, rpc_get_enc_param);
+}
+
+pMEDIAIP_ENC_MEM_POOL get_rpc_mem_pool(struct vpu_ctx *ctx)
+{
+       return GET_CTX_RPC(ctx, rpc_get_mem_pool);
+}
+
+pENC_ENCODING_STATUS get_rpc_encoding_status(struct vpu_ctx *ctx)
+{
+       if (!ctx || !ctx->core_dev)
+               return NULL;
+       return GET_CTX_RPC(ctx, rpc_get_encoding_status);
+}
+
+pENC_DSA_STATUS_t get_rpc_dsa_status(struct vpu_ctx *ctx)
+{
+       if (!ctx || !ctx->core_dev)
+               return NULL;
+       return GET_CTX_RPC(ctx, rpc_get_dsa_status);
+}
+
+static int vpu_enc_v4l2_ioctl_querycap(struct file *file,
+               void *fh,
+               struct v4l2_capability *cap)
+{
+       vpu_log_func();
+       strlcpy(cap->driver, "vpu encoder", sizeof(cap->driver));
+       strlcpy(cap->card, "vpu encoder", sizeof(cap->card));
+       strlcpy(cap->bus_info, "platform:", sizeof(cap->bus_info));
+       cap->version = KERNEL_VERSION(0, 0, 1);
+       cap->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE |
+                               V4L2_CAP_STREAMING |
+                               V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+                               V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+       return 0;
+}
+
+static int vpu_enc_v4l2_ioctl_enum_fmt_vid_cap_mplane(struct file *file,
+               void *fh,
+               struct v4l2_fmtdesc *f)
+{
+       struct vpu_v4l2_fmt *fmt;
+
+       vpu_log_func();
+       if (f->index >= ARRAY_SIZE(formats_compressed_enc))
+               return -EINVAL;
+
+       fmt = &formats_compressed_enc[f->index];
+       strlcpy(f->description, fmt->name, sizeof(f->description));
+       f->pixelformat = fmt->fourcc;
+       f->flags |= V4L2_FMT_FLAG_COMPRESSED;
+       return 0;
+}
+static int vpu_enc_v4l2_ioctl_enum_fmt_vid_out_mplane(struct file *file,
+               void *fh,
+               struct v4l2_fmtdesc *f)
+{
+       struct vpu_v4l2_fmt *fmt;
+
+       vpu_log_func();
+       if (f->index >= ARRAY_SIZE(formats_yuv_enc))
+               return -EINVAL;
+
+       fmt = &formats_yuv_enc[f->index];
+       strlcpy(f->description, fmt->name, sizeof(f->description));
+       f->pixelformat = fmt->fourcc;
+       return 0;
+}
+
+static int vpu_enc_v4l2_ioctl_enum_framesizes(struct file *file, void *fh,
+                                       struct v4l2_frmsizeenum *fsize)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct vpu_dev *vdev = ctx->dev;
+
+       if (!fsize)
+               return -EINVAL;
+
+       if (fsize->index)
+               return -EINVAL;
+
+       if (!vdev)
+               return -EINVAL;
+
+       vpu_log_func();
+       fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+       fsize->stepwise.max_width = vdev->supported_size.max_width;
+       fsize->stepwise.max_height = vdev->supported_size.max_height;
+       fsize->stepwise.min_width = vdev->supported_size.min_width;
+       fsize->stepwise.min_height = vdev->supported_size.min_height;
+       fsize->stepwise.step_width = vdev->supported_size.step_width;
+       fsize->stepwise.step_height = vdev->supported_size.step_height;
+
+       return 0;
+}
+
+static int vpu_enc_v4l2_ioctl_enum_frameintervals(struct file *file, void *fh,
+                                               struct v4l2_frmivalenum *fival)
+{
+       u32 framerate;
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct vpu_dev *vdev = ctx->dev;
+
+
+       if (!fival)
+               return -EINVAL;
+       if (!vdev)
+               return -EINVAL;
+
+       vpu_log_func();
+       framerate = vdev->supported_fps.min +
+                       fival->index * vdev->supported_fps.step;
+       if (framerate > vdev->supported_fps.max)
+               return -EINVAL;
+
+       fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+       fival->discrete.numerator = 1;
+       fival->discrete.denominator = framerate;
+
+       return 0;
+}
+
+static struct queue_data *get_queue_by_v4l2_type(struct vpu_ctx *ctx, u32 type)
+{
+       struct queue_data *queue = NULL;
+
+       if (!ctx)
+               return NULL;
+
+       switch (type) {
+       case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+               queue = &ctx->q_data[V4L2_SRC];
+               break;
+       case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+               queue = &ctx->q_data[V4L2_DST];
+               break;
+       default:
+               vpu_err("unsupport v4l2 buf type : %d\n", type);
+               break;
+       }
+
+       return queue;
+}
+
+static int vpu_enc_v4l2_ioctl_g_fmt(struct file *file,
+               void *fh,
+               struct v4l2_format *f)
+{
+       struct vpu_ctx *ctx =           v4l2_fh_to_ctx(fh);
+       struct v4l2_pix_format_mplane   *pix_mp = &f->fmt.pix_mp;
+       struct queue_data *q_data;
+       unsigned int i;
+
+       q_data = get_queue_by_v4l2_type(ctx, f->type);
+       if (!q_data)
+               return -EINVAL;
+       vpu_dbg(LVL_FUNC, "%s(), %s\n", __func__, q_data->desc);
+
+       if (!q_data->current_fmt) {
+               vpu_err("%s's current fmt is NULL\n", q_data->desc);
+               return -EINVAL;
+       }
+
+       pix_mp->pixelformat = q_data->current_fmt->fourcc;
+       pix_mp->num_planes = q_data->current_fmt->num_planes;
+       pix_mp->width = q_data->width;
+       pix_mp->height = q_data->height;
+       pix_mp->field = V4L2_FIELD_ANY;
+       for (i = 0; i < pix_mp->num_planes; i++)
+               pix_mp->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+
+       if (V4L2_TYPE_IS_OUTPUT(f->type))
+               pix_mp->colorspace = V4L2_COLORSPACE_REC709;
+       else
+               pix_mp->plane_fmt[0].bytesperline = q_data->width;
+
+       return 0;
+}
+
+u32 cpu_phy_to_mu(struct core_device *dev, u32 addr)
+{
+       return addr - dev->m0_p_fw_space_phy;
+}
+
+static int initialize_enc_param(struct vpu_ctx *ctx)
+{
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       pMEDIAIP_ENC_PARAM param = &attr->param;
+
+       mutex_lock(&ctx->instance_mutex);
+
+       param->eCodecMode = MEDIAIP_ENC_FMT_H264;
+       param->tEncMemDesc.uMemPhysAddr = 0;
+       param->tEncMemDesc.uMemVirtAddr = 0;
+       param->tEncMemDesc.uMemSize     = 0;
+       param->uSrcStride = VPU_ENC_WIDTH_DEFAULT;
+       param->uSrcWidth = VPU_ENC_WIDTH_DEFAULT;
+       param->uSrcHeight = VPU_ENC_HEIGHT_DEFAULT;
+       param->uSrcOffset_x = 0;
+       param->uSrcOffset_y = 0;
+       param->uSrcCropWidth = VPU_ENC_WIDTH_DEFAULT;
+       param->uSrcCropHeight = VPU_ENC_HEIGHT_DEFAULT;
+       param->uOutWidth = VPU_ENC_WIDTH_DEFAULT;
+       param->uOutHeight = VPU_ENC_HEIGHT_DEFAULT;
+       param->uFrameRate = VPU_ENC_FRAMERATE_DEFAULT;
+       param->uMinBitRate = BITRATE_LOW_THRESHOLD;
+
+       mutex_unlock(&ctx->instance_mutex);
+
+       return 0;
+}
+
+static int check_stepwise(u32 val, u32 min, u32 max, u32 step)
+{
+       if (val < min)
+               return -EINVAL;
+       if (val > max)
+               return -EINVAL;
+       if ((val - min) % step)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int check_size(struct vpu_dev *vdev, u32 width, u32 height)
+{
+       int ret;
+
+       if (!vdev)
+               return -EINVAL;
+
+       ret = check_stepwise(width,
+                       vdev->supported_size.min_width,
+                       vdev->supported_size.max_width,
+                       vdev->supported_size.step_width);
+       if (ret) {
+               vpu_err("Unsupported frame size : %dx%d\n", width, height);
+               return -EINVAL;
+       }
+       ret = check_stepwise(height,
+                       vdev->supported_size.min_height,
+                       vdev->supported_size.max_height,
+                       vdev->supported_size.step_height);
+       if (ret) {
+               vpu_err("Unsupported frame size : %dx%d\n", width, height);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int valid_crop_info(struct queue_data *queue, struct v4l2_rect *rect)
+{
+       struct vpu_ctx *ctx;
+       u32 MIN_WIDTH;
+       u32 MIN_HEIGHT;
+
+       if (!queue || !rect || !queue->ctx)
+               return -EINVAL;
+
+       ctx = queue->ctx;
+       MIN_WIDTH = ctx->dev->supported_size.min_width;
+       MIN_HEIGHT = ctx->dev->supported_size.min_height;
+
+       if (rect->left > queue->width - MIN_WIDTH ||
+               rect->top > queue->height - MIN_HEIGHT) {
+               rect->left = 0;
+               rect->top = 0;
+               rect->width = queue->width;
+               rect->height = queue->height;
+               return 0;
+       }
+
+       rect->width = min(rect->width, queue->width - rect->left);
+       if (rect->width)
+               rect->width = max_t(u32, rect->width, MIN_WIDTH);
+       else
+               rect->width = queue->width;
+       rect->height = min(rect->height, queue->height - rect->top);
+       if (rect->height)
+               rect->height = max_t(u32, rect->height, MIN_HEIGHT);
+       else
+               rect->height = queue->height;
+
+       return 0;
+}
+
+static int check_v4l2_fmt(struct vpu_dev *dev, struct v4l2_format *f)
+{
+       int ret = -EINVAL;
+
+       switch (f->type) {
+       case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+       case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+               ret = check_size(dev, f->fmt.pix.width, f->fmt.pix.height);
+               break;
+       case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+       case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+               ret = check_size(dev, f->fmt.pix_mp.width,
+                                       f->fmt.pix_mp.height);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+static struct vpu_v4l2_fmt *find_fmt_by_fourcc(struct vpu_v4l2_fmt *fmts,
+                                               unsigned int size,
+                                               u32 fourcc)
+{
+       unsigned int i;
+
+       if (!fmts || !size)
+               return NULL;
+
+       for (i = 0; i < size; i++) {
+               if (fmts[i].fourcc == fourcc)
+                       return &fmts[i];
+       }
+
+       return NULL;
+}
+
+static char *cvrt_fourcc_to_str(u32 pixelformat)
+{
+       static char str[5];
+
+       str[0] = pixelformat & 0xff;
+       str[1] = (pixelformat >> 8) & 0xff;
+       str[2] = (pixelformat >> 16) & 0xff;
+       str[3] = (pixelformat >> 24) & 0xff;
+       str[4] = '\0';
+
+       return str;
+}
+
+static int set_yuv_queue_fmt(struct queue_data *q_data, struct v4l2_format *f)
+{
+       struct vpu_v4l2_fmt *fmt = NULL;
+       struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+       int i;
+
+       if (!q_data || !f)
+               return -EINVAL;
+
+       fmt = find_fmt_by_fourcc(q_data->supported_fmts, q_data->fmt_count,
+                               pix_mp->pixelformat);
+       if (!fmt) {
+               vpu_err("unsupport yuv fmt : %s\n",
+                               cvrt_fourcc_to_str(pix_mp->pixelformat));
+               return -EINVAL;
+       }
+
+       q_data->width = pix_mp->width;
+       q_data->height = pix_mp->height;
+       q_data->rect.left = 0;
+       q_data->rect.top = 0;
+       q_data->rect.width = pix_mp->width;
+       q_data->rect.height = pix_mp->height;
+       q_data->sizeimage[0] = pix_mp->width * pix_mp->height;
+       q_data->sizeimage[1] = pix_mp->width * pix_mp->height / 2;
+       pix_mp->num_planes = fmt->num_planes;
+       for (i = 0; i < pix_mp->num_planes; i++)
+               pix_mp->plane_fmt[i].sizeimage = q_data->sizeimage[i];
+
+       q_data->current_fmt = fmt;
+
+       return 0;
+}
+
+static u32 get_enc_minimum_sizeimage(u32 width, u32 height)
+{
+       const u32 THRESHOLD = 256 * 1024;
+       u32 sizeimage;
+
+       sizeimage = width * height / 2;
+       if (sizeimage < THRESHOLD)
+               sizeimage = THRESHOLD;
+
+       return sizeimage;
+}
+
+static int set_enc_queue_fmt(struct queue_data *q_data, struct v4l2_format *f)
+{
+       struct vpu_v4l2_fmt *fmt = NULL;
+       struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+       u32 sizeimage;
+
+       if (!q_data || !f)
+               return -EINVAL;
+
+       fmt = find_fmt_by_fourcc(q_data->supported_fmts, q_data->fmt_count,
+                               pix_mp->pixelformat);
+       if (!fmt) {
+               vpu_err("unsupport encode fmt : %s\n",
+                               cvrt_fourcc_to_str(pix_mp->pixelformat));
+               return -EINVAL;
+       }
+
+       q_data->width = pix_mp->width;
+       q_data->height = pix_mp->height;
+       sizeimage = get_enc_minimum_sizeimage(pix_mp->width, pix_mp->height);
+       q_data->sizeimage[0] = max(sizeimage, pix_mp->plane_fmt[0].sizeimage);
+       pix_mp->plane_fmt[0].sizeimage = q_data->sizeimage[0];
+
+       q_data->current_fmt = fmt;
+
+       return 0;
+}
+
+static int vpu_enc_v4l2_ioctl_s_fmt(struct file *file,
+               void *fh,
+               struct v4l2_format *f)
+{
+       struct vpu_ctx                  *ctx = v4l2_fh_to_ctx(fh);
+       int                             ret = 0;
+       struct queue_data               *q_data;
+       pMEDIAIP_ENC_PARAM  pEncParam;
+       struct vpu_attr *attr;
+
+       attr = get_vpu_ctx_attr(ctx);
+       pEncParam = &attr->param;
+       q_data = get_queue_by_v4l2_type(ctx, f->type);
+       if (!q_data)
+               return -EINVAL;
+       vpu_dbg(LVL_FUNC, "%s(), %s, (%d, %d)\n", __func__, q_data->desc,
+                       ctx->core_dev->id, ctx->str_index);
+
+       ret = check_v4l2_fmt(ctx->dev, f);
+       if (ret)
+               return ret;
+
+       mutex_lock(&ctx->instance_mutex);
+       if (V4L2_TYPE_IS_OUTPUT(f->type))
+               ret = set_yuv_queue_fmt(q_data, f);
+       else
+               ret = set_enc_queue_fmt(q_data, f);
+       mutex_unlock(&ctx->instance_mutex);
+
+       return ret;
+}
+
+static int vpu_enc_v4l2_ioctl_g_parm(struct file *file, void *fh,
+                               struct v4l2_streamparm *parm)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct vpu_attr *attr = NULL;
+       pMEDIAIP_ENC_PARAM param = NULL;
+
+       if (!parm || !ctx)
+               return -EINVAL;
+
+       attr = get_vpu_ctx_attr(ctx);
+       param = &attr->param;
+
+       vpu_log_func();
+       parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+       parm->parm.capture.capturemode = V4L2_CAP_TIMEPERFRAME;
+       parm->parm.capture.timeperframe.numerator = 1;
+       parm->parm.capture.timeperframe.denominator = param->uFrameRate;
+       parm->parm.capture.readbuffers = 0;
+
+       return 0;
+}
+
+static int find_proper_framerate(struct vpu_dev *dev, struct v4l2_fract *fival)
+{
+       u32 min_delta = INT_MAX;
+       struct v4l2_fract target_fival = {0, 0};
+       u32 framerate;
+
+       if (!fival || !dev)
+               return -EINVAL;
+
+       framerate = dev->supported_fps.min;
+
+       while (framerate <= dev->supported_fps.max) {
+               u32 delta;
+
+               delta = abs(fival->numerator * framerate -
+                               fival->denominator);
+               if (!delta)
+                       return 0;
+               if (delta < min_delta) {
+                       target_fival.numerator = 1;
+                       target_fival.denominator = framerate;
+                       min_delta = delta;
+               }
+
+               framerate += dev->supported_fps.step;
+       }
+       if (!target_fival.numerator || !target_fival.denominator)
+               return -EINVAL;
+
+       fival->numerator = target_fival.numerator;
+       fival->denominator = target_fival.denominator;
+
+       return 0;
+}
+
+static int vpu_enc_v4l2_ioctl_s_parm(struct file *file, void *fh,
+                               struct v4l2_streamparm *parm)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct vpu_attr *attr = NULL;
+       struct v4l2_fract fival;
+       int ret;
+
+       if (!parm || !ctx)
+               return -EINVAL;
+
+       vpu_log_func();
+       attr = get_vpu_ctx_attr(ctx);
+
+       fival.numerator = parm->parm.capture.timeperframe.numerator;
+       fival.denominator = parm->parm.capture.timeperframe.denominator;
+       if (!fival.numerator || !fival.denominator)
+               return -EINVAL;
+
+       ret = find_proper_framerate(ctx->dev, &fival);
+       if (ret) {
+               vpu_err("Unsupported FPS : %d / %d\n",
+                               fival.numerator, fival.denominator);
+               return ret;
+       }
+
+       mutex_lock(&ctx->instance_mutex);
+       attr->param.uFrameRate = fival.denominator / fival.numerator;
+       mutex_unlock(&ctx->instance_mutex);
+
+       parm->parm.capture.timeperframe.numerator = fival.numerator;
+       parm->parm.capture.timeperframe.denominator = fival.denominator;
+
+       return 0;
+}
+
+static int vpu_enc_queue_expbuf(struct queue_data *queue,
+                               struct v4l2_exportbuffer *buf)
+{
+       int ret = -EINVAL;
+
+       down(&queue->drv_q_lock);
+       if (queue->vb2_q_inited)
+               ret = vb2_expbuf(&queue->vb2_q, buf);
+       up(&queue->drv_q_lock);
+
+       return ret;
+}
+
+static int vpu_enc_queue_reqbufs(struct queue_data *queue,
+                               struct v4l2_requestbuffers *reqbuf)
+{
+       int ret = -EINVAL;
+
+       down(&queue->drv_q_lock);
+       if (queue->vb2_q_inited)
+               ret = vb2_reqbufs(&queue->vb2_q, reqbuf);
+       up(&queue->drv_q_lock);
+
+       return ret;
+}
+
+static int vpu_enc_queue_querybuf(struct queue_data *queue,
+                               struct v4l2_buffer *buf)
+{
+       int ret = -EINVAL;
+
+       down(&queue->drv_q_lock);
+       if (queue->vb2_q_inited)
+               ret = vb2_querybuf(&queue->vb2_q, buf);
+       up(&queue->drv_q_lock);
+
+       return ret;
+}
+
+static int vpu_enc_queue_qbuf(struct queue_data *queue,
+                               struct v4l2_buffer *buf)
+{
+       int ret = -EINVAL;
+
+       down(&queue->drv_q_lock);
+       if (queue->vb2_q_inited)
+               ret = vb2_qbuf(&queue->vb2_q, buf);
+       up(&queue->drv_q_lock);
+
+       return ret;
+}
+
+static int vpu_enc_queue_dqbuf(struct queue_data *queue,
+                               struct v4l2_buffer *buf, bool nonblocking)
+{
+       int ret = -EINVAL;
+
+       down(&queue->drv_q_lock);
+       if (queue->vb2_q_inited)
+               ret = vb2_dqbuf(&queue->vb2_q, buf, nonblocking);
+       up(&queue->drv_q_lock);
+
+       return ret;
+}
+
+static int vpu_enc_queue_enable(struct queue_data *queue,
+                               enum v4l2_buf_type type)
+{
+       int ret = -EINVAL;
+
+       down(&queue->drv_q_lock);
+       if (queue->vb2_q_inited)
+               ret = vb2_streamon(&queue->vb2_q, type);
+       up(&queue->drv_q_lock);
+
+       return ret;
+}
+
+static void clear_queue(struct queue_data *queue)
+{
+       struct vpu_frame_info *frame;
+       struct vpu_frame_info *tmp;
+       struct vb2_data_req *p_data_req;
+       struct vb2_data_req *p_temp;
+       struct vb2_buffer *vb;
+
+       if (!queue)
+               return;
+
+       list_for_each_entry_safe(frame, tmp, &queue->frame_q, list) {
+               vpu_dbg(LVL_INFO, "drop frame\n");
+               put_frame_idle(frame);
+       }
+
+       list_for_each_entry_safe(frame, tmp, &queue->frame_idle, list)
+               dec_frame(frame);
+
+       list_for_each_entry_safe(p_data_req, p_temp, &queue->drv_q, list) {
+               vpu_dbg(LVL_DEBUG, "%s(%d) - list_del(%p)\n", __func__,
+                               p_data_req->sequence, p_data_req);
+               list_del(&p_data_req->list);
+       }
+       list_for_each_entry(vb, &queue->vb2_q.queued_list, queued_entry) {
+               if (vb->state == VB2_BUF_STATE_ACTIVE)
+                       vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+       }
+
+       INIT_LIST_HEAD(&queue->drv_q);
+       INIT_LIST_HEAD(&queue->frame_q);
+       INIT_LIST_HEAD(&queue->frame_idle);
+}
+
+static int vpu_enc_queue_disable(struct queue_data *queue,
+                               enum v4l2_buf_type type)
+{
+       int ret = -EINVAL;
+
+       down(&queue->drv_q_lock);
+       if (queue->vb2_q_inited)
+               ret = vb2_streamoff(&queue->vb2_q, type);
+       up(&queue->drv_q_lock);
+
+       return ret;
+}
+
+static int vpu_enc_queue_release(struct queue_data *queue)
+{
+       int ret = -EINVAL;
+
+       down(&queue->drv_q_lock);
+       if (queue->vb2_q_inited) {
+               clear_queue(queue);
+               vb2_queue_release(&queue->vb2_q);
+       }
+       up(&queue->drv_q_lock);
+
+       return ret;
+}
+
+static int vpu_enc_queue_mmap(struct queue_data *queue,
+                               struct vm_area_struct *vma)
+{
+       int ret = -EINVAL;
+
+       down(&queue->drv_q_lock);
+       if (queue->vb2_q_inited)
+               ret = vb2_mmap(&queue->vb2_q, vma);
+       up(&queue->drv_q_lock);
+
+       return ret;
+}
+
+static int vpu_enc_v4l2_ioctl_expbuf(struct file *file,
+               void *fh,
+               struct v4l2_exportbuffer *buf)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct queue_data *q_data;
+
+       q_data = get_queue_by_v4l2_type(ctx, buf->type);
+       if (!q_data)
+               return -EINVAL;
+       vpu_dbg(LVL_FUNC, "%s(), %s\n", __func__, q_data->desc);
+
+       return vpu_enc_queue_expbuf(q_data, buf);
+}
+
+static int vpu_enc_v4l2_ioctl_subscribe_event(struct v4l2_fh *fh,
+               const struct v4l2_event_subscription *sub
+               )
+{
+       vpu_log_func();
+
+       switch (sub->type) {
+       case V4L2_EVENT_EOS:
+               return v4l2_event_subscribe(fh, sub, 0, NULL);
+       case V4L2_EVENT_SOURCE_CHANGE:
+               return v4l2_src_change_event_subscribe(fh, sub);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int vpu_enc_v4l2_ioctl_reqbufs(struct file *file,
+               void *fh,
+               struct v4l2_requestbuffers *reqbuf)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct queue_data *q_data;
+       int ret;
+
+       q_data = get_queue_by_v4l2_type(ctx, reqbuf->type);
+       if (!q_data)
+               return -EINVAL;
+       vpu_dbg(LVL_FUNC, "%s(), %s, (%d, %d)\n", __func__, q_data->desc,
+                       ctx->core_dev->id, ctx->str_index);
+
+       ret = vpu_enc_queue_reqbufs(q_data, reqbuf);
+
+       return ret;
+}
+
+static int vpu_enc_v4l2_ioctl_querybuf(struct file *file,
+               void *fh,
+               struct v4l2_buffer *buf)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct queue_data *q_data;
+       unsigned int i;
+       int ret;
+
+       q_data = get_queue_by_v4l2_type(ctx, buf->type);
+       if (!q_data)
+               return -EINVAL;
+       vpu_dbg(LVL_FUNC, "%s(), %s, (%d, %d)\n", __func__, q_data->desc,
+                       ctx->core_dev->id, ctx->str_index);
+
+       ret = vpu_enc_queue_querybuf(q_data, buf);
+       if (ret)
+               return ret;
+
+       if (buf->memory == V4L2_MEMORY_MMAP) {
+               if (V4L2_TYPE_IS_MULTIPLANAR(buf->type)) {
+                       for (i = 0; i < buf->length; i++)
+                               buf->m.planes[i].m.mem_offset |= (q_data->type << MMAP_BUF_TYPE_SHIFT);
+               } else
+                       buf->m.offset |= (q_data->type << MMAP_BUF_TYPE_SHIFT);
+       }
+
+       return ret;
+}
+
+static struct vb2_buffer *cvrt_v4l2_to_vb2_buffer(struct vb2_queue *vq,
+                                               struct v4l2_buffer *buf)
+{
+       if (!vq || !buf)
+               return NULL;
+
+       if (buf->index >= vq->num_buffers)
+               return NULL;
+
+       return vq->bufs[buf->index];
+}
+
+static u32 get_v4l2_plane_payload(struct v4l2_plane *plane)
+{
+       return plane->bytesused - plane->data_offset;
+}
+
+static void set_v4l2_plane_payload(struct v4l2_plane *plane, u32 size)
+{
+       plane->bytesused = plane->data_offset + size;
+}
+
+static int is_valid_output_mplane_buf(struct queue_data *q_data,
+                                       struct vpu_v4l2_fmt *fmt,
+                                       struct v4l2_buffer *buf)
+{
+       int i;
+
+       for (i = 0; i < fmt->num_planes; i++) {
+               u32 bytesused = get_v4l2_plane_payload(&buf->m.planes[i]);
+
+               if (!bytesused) {
+                       set_v4l2_plane_payload(&buf->m.planes[i],
+                                               q_data->sizeimage[i]);
+                       continue;
+               }
+               if (fmt->is_yuv && bytesused != q_data->sizeimage[i])
+                       return 0;
+       }
+
+       return 1;
+}
+
+static int is_valid_output_buf(struct queue_data *q_data,
+                               struct vpu_v4l2_fmt *fmt,
+                               struct v4l2_buffer *buf)
+{
+       if (!buf->bytesused) {
+               buf->bytesused = q_data->sizeimage[0];
+               return 1;
+       }
+       if (fmt->is_yuv && buf->bytesused != q_data->sizeimage[0])
+               return 0;
+
+       return 1;
+}
+
+static int precheck_qbuf(struct queue_data *q_data, struct v4l2_buffer *buf)
+{
+       struct vb2_buffer *vb = NULL;
+       struct vpu_v4l2_fmt *fmt;
+       int ret;
+
+       if (!q_data || !buf)
+               return -EINVAL;
+
+       if (!q_data->current_fmt)
+               return -EINVAL;
+
+       vb = cvrt_v4l2_to_vb2_buffer(&q_data->vb2_q, buf);
+       if (!vb) {
+               vpu_err("invalid v4l2 buffer index:%d\n", buf->index);
+               return -EINVAL;
+       }
+       if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+               vpu_err("invalid buffer state:%d\n", vb->state);
+               return -EINVAL;
+       }
+
+       if (!V4L2_TYPE_IS_OUTPUT(buf->type))
+               return 0;
+
+       fmt = q_data->current_fmt;
+       if (V4L2_TYPE_IS_MULTIPLANAR(buf->type))
+               ret = is_valid_output_mplane_buf(q_data, fmt, buf);
+       else
+               ret = is_valid_output_buf(q_data, fmt, buf);
+       if (!ret)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int vpu_enc_v4l2_ioctl_qbuf(struct file *file,
+               void *fh,
+               struct v4l2_buffer *buf)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct queue_data *q_data;
+       int ret;
+
+       q_data = get_queue_by_v4l2_type(ctx, buf->type);
+       if (!q_data)
+               return -EINVAL;
+       vpu_dbg(LVL_FUNC, "%s(), %s, (%d, %d)\n", __func__, q_data->desc,
+                       ctx->core_dev->id, ctx->str_index);
+
+       ret = precheck_qbuf(q_data, buf);
+       if (ret < 0)
+               return ret;
+
+       ret = vpu_enc_queue_qbuf(q_data, buf);
+       if (ret)
+               return ret;
+
+       if (V4L2_TYPE_IS_OUTPUT(buf->type)) {
+               mutex_lock(&ctx->dev->dev_mutex);
+               mutex_lock(&ctx->instance_mutex);
+               if (!test_bit(VPU_ENC_STATUS_CONFIGURED, &ctx->status))
+                       set_bit(VPU_ENC_STATUS_DATA_READY, &ctx->status);
+               configure_codec(ctx);
+               mutex_unlock(&ctx->instance_mutex);
+               mutex_unlock(&ctx->dev->dev_mutex);
+
+               submit_input_and_encode(ctx);
+               count_yuv_input(ctx);
+       } else {
+               process_stream_output(ctx);
+       }
+
+       return ret;
+}
+
+static void notify_eos(struct vpu_ctx *ctx)
+{
+       const struct v4l2_event ev = {
+               .type = V4L2_EVENT_EOS
+       };
+
+       mutex_lock(&ctx->instance_mutex);
+       if (!test_bit(VPU_ENC_STATUS_CLOSED, &ctx->status) &&
+               !test_and_set_bit(VPU_ENC_STATUS_EOS_SEND, &ctx->status))
+               v4l2_event_queue_fh(&ctx->fh, &ev);
+       mutex_unlock(&ctx->instance_mutex);
+}
+
+static int send_eos(struct vpu_ctx *ctx)
+{
+       if (!ctx)
+               return -EINVAL;
+
+       if (!test_bit(VPU_ENC_STATUS_START_SEND, &ctx->status)) {
+               notify_eos(ctx);
+               return 0;
+       }
+
+       if (!test_and_set_bit(VPU_ENC_STATUS_STOP_SEND, &ctx->status)) {
+               vpu_dbg(LVL_INFO, "stop stream\n");
+               vpu_ctx_send_cmd(ctx, GTB_ENC_CMD_STREAM_STOP, 0, NULL);
+       }
+
+       return 0;
+}
+
+static int vpu_enc_v4l2_ioctl_dqbuf(struct file *file,
+               void *fh,
+               struct v4l2_buffer *buf)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct queue_data *q_data;
+       int ret;
+
+       q_data = get_queue_by_v4l2_type(ctx, buf->type);
+       if (!q_data)
+               return -EINVAL;
+       vpu_dbg(LVL_FUNC, "%s(), %s, (%d, %d)\n", __func__, q_data->desc,
+                       ctx->core_dev->id, ctx->str_index);
+
+       ret = vpu_enc_queue_dqbuf(q_data, buf, file->f_flags & O_NONBLOCK);
+
+       if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+               if (!ret)
+                       count_h264_output(ctx);
+               buf->flags = q_data->vb2_reqs[buf->index].buffer_flags;
+       }
+
+       return ret;
+}
+
+static bool format_is_support(struct vpu_v4l2_fmt *format_table,
+               unsigned int table_size,
+               struct v4l2_format *f)
+{
+       unsigned int i;
+
+       for (i = 0; i < table_size; i++) {
+               if (format_table[i].fourcc == f->fmt.pix_mp.pixelformat)
+                       return true;
+       }
+       return false;
+}
+
+static int vpu_enc_v4l2_ioctl_try_fmt(struct file *file,
+               void *fh,
+               struct v4l2_format *f)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct v4l2_pix_format_mplane   *pix_mp = &f->fmt.pix_mp;
+       struct queue_data *q_data;
+
+       q_data = get_queue_by_v4l2_type(ctx, f->type);
+       if (!q_data)
+               return -EINVAL;
+       vpu_dbg(LVL_FUNC, "%s(), %s\n", __func__, q_data->desc);
+
+       if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+               pix_mp->field = V4L2_FIELD_ANY;
+               pix_mp->colorspace = V4L2_COLORSPACE_REC709;
+       }
+       if (!format_is_support(q_data->supported_fmts, q_data->fmt_count, f))
+               return -EINVAL;
+
+       return 0;
+}
+
+static int vpu_enc_v4l2_ioctl_g_crop(struct file *file, void *fh,
+                               struct v4l2_crop *cr)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct queue_data *src = &ctx->q_data[V4L2_SRC];
+
+       if (!cr)
+               return -EINVAL;
+
+       if (get_queue_by_v4l2_type(ctx, cr->type) != src)
+               return -EINVAL;
+
+       vpu_log_func();
+       cr->c.left = src->rect.left;
+       cr->c.top = src->rect.top;
+       cr->c.width = src->rect.width;
+       cr->c.height = src->rect.height;
+
+       return 0;
+}
+
+static int vpu_enc_v4l2_ioctl_s_crop(struct file *file, void *fh,
+                               const struct v4l2_crop *cr)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct queue_data *src = &ctx->q_data[V4L2_SRC];
+       struct vpu_dev *dev = ctx->dev;
+
+       if (!cr)
+               return -EINVAL;
+       if (!dev)
+               return -EINVAL;
+
+       if (get_queue_by_v4l2_type(ctx, cr->type) != src)
+               return -EINVAL;
+
+       vpu_log_func();
+       src->rect.left = ALIGN(cr->c.left, dev->supported_size.step_width);
+       src->rect.top = ALIGN(cr->c.top, dev->supported_size.step_height);
+       src->rect.width = ALIGN(cr->c.width, dev->supported_size.step_width);
+       src->rect.height = ALIGN(cr->c.height, dev->supported_size.step_height);
+       valid_crop_info(src, &src->rect);
+
+       return 0;
+}
+
+static int response_stop_stream(struct vpu_ctx *ctx)
+{
+       struct queue_data *queue;
+
+       if (!ctx)
+               return -EINVAL;
+
+       queue = &ctx->q_data[V4L2_SRC];
+
+       down(&queue->drv_q_lock);
+       if (!list_empty(&queue->drv_q))
+               goto exit;
+
+       if (!test_bit(VPU_ENC_FLAG_WRITEABLE, &queue->rw_flag))
+               goto exit;
+       if (test_and_clear_bit(VPU_ENC_STATUS_STOP_REQ, &ctx->status))
+               send_eos(ctx);
+exit:
+       up(&queue->drv_q_lock);
+
+       return 0;
+}
+
+static int request_eos(struct vpu_ctx *ctx)
+{
+       WARN_ON(!ctx);
+
+       set_bit(VPU_ENC_STATUS_STOP_REQ, &ctx->status);
+       response_stop_stream(ctx);
+
+       return 0;
+}
+
+static int set_core_force_release(struct core_device *core)
+{
+       int i;
+
+       if (!core)
+               return -EINVAL;
+
+       for (i = 0; i < core->supported_instance_count; i++) {
+               if (!core->ctx[i])
+                       continue;
+               set_bit(VPU_ENC_STATUS_FORCE_RELEASE, &core->ctx[i]->status);
+       }
+
+       return 0;
+}
+
+static void clear_start_status(struct vpu_ctx *ctx)
+{
+       if (!ctx)
+               return;
+
+       clear_bit(VPU_ENC_STATUS_CONFIGURED, &ctx->status);
+       clear_bit(VPU_ENC_STATUS_START_SEND, &ctx->status);
+       clear_bit(VPU_ENC_STATUS_START_DONE, &ctx->status);
+}
+
+static void clear_stop_status(struct vpu_ctx *ctx)
+{
+       if (!ctx)
+               return;
+
+       clear_bit(VPU_ENC_STATUS_STOP_REQ, &ctx->status);
+       clear_bit(VPU_ENC_STATUS_STOP_SEND, &ctx->status);
+       clear_bit(VPU_ENC_STATUS_STOP_DONE, &ctx->status);
+       clear_bit(VPU_ENC_STATUS_EOS_SEND, &ctx->status);
+}
+
+static void reset_core_on_hang(struct core_device *core)
+{
+       int ret;
+       int i;
+
+       for (i = 0; i < core->supported_instance_count; i++)
+               clear_start_status(core->ctx[i]);
+
+       ret = sw_reset_firmware(core, 1);
+       if (ret)
+               vpu_err("reset core[%d] on hang fail\n", core->id);
+}
+
+static int set_core_hang(struct core_device *core)
+{
+       core->hang = true;
+
+       if (reset_on_hang)
+               reset_core_on_hang(core);
+
+       return 0;
+}
+
+static void clear_core_hang(struct core_device *core)
+{
+       if (!core)
+               return;
+
+       core->hang = false;
+}
+
+static void wait_for_stop_done(struct vpu_ctx *ctx)
+{
+       int ret;
+
+       WARN_ON(!ctx);
+
+       if (!test_bit(VPU_ENC_STATUS_START_SEND, &ctx->status))
+               return;
+       if (test_bit(VPU_ENC_STATUS_STOP_DONE, &ctx->status))
+               return;
+
+       ret = wait_for_completion_timeout(&ctx->stop_cmp,
+                                               msecs_to_jiffies(500));
+       if (!ret)
+               vpu_err("wait for stop done timeout\n");
+}
+
+static int vpu_enc_v4l2_ioctl_encoder_cmd(struct file *file,
+               void *fh,
+               struct v4l2_encoder_cmd *cmd
+               )
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+
+       vpu_dbg(LVL_FUNC, "%s(), cmd = %d, (%d, %d)\n", __func__, cmd->cmd,
+                       ctx->core_dev->id, ctx->str_index);
+       switch (cmd->cmd) {
+       case V4L2_ENC_CMD_START:
+               break;
+       case V4L2_ENC_CMD_STOP:
+               request_eos(ctx);
+               break;
+       case V4L2_ENC_CMD_PAUSE:
+               break;
+       case V4L2_ENC_CMD_RESUME:
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int vpu_enc_v4l2_ioctl_streamon(struct file *file,
+               void *fh,
+               enum v4l2_buf_type i
+               )
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct vpu_attr *attr;
+       struct queue_data *q_data;
+       int ret;
+
+       q_data = get_queue_by_v4l2_type(ctx, i);
+       if (!q_data)
+               return -EINVAL;
+       vpu_dbg(LVL_FUNC, "%s(), %s, (%d, %d)\n", __func__, q_data->desc,
+                       ctx->core_dev->id, ctx->str_index);
+
+       attr = get_vpu_ctx_attr(ctx);
+       if (attr) {
+               attr->ts_start[V4L2_SRC] = 0;
+               attr->ts_start[V4L2_DST] = 0;
+       }
+
+       ret = vpu_enc_queue_enable(q_data, i);
+       if (ret)
+               return ret;
+
+       if (V4L2_TYPE_IS_OUTPUT(i)) {
+               mutex_lock(&ctx->dev->dev_mutex);
+               mutex_lock(&ctx->instance_mutex);
+               set_bit(VPU_ENC_STATUS_OUTPUT_READY, &ctx->status);
+               configure_codec(ctx);
+               mutex_unlock(&ctx->instance_mutex);
+               mutex_unlock(&ctx->dev->dev_mutex);
+       }
+
+       return 0;
+}
+
+static int vpu_enc_v4l2_ioctl_streamoff(struct file *file,
+               void *fh,
+               enum v4l2_buf_type i)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(fh);
+       struct queue_data *q_data;
+       int ret;
+
+       q_data = get_queue_by_v4l2_type(ctx, i);
+       if (!q_data)
+               return -EINVAL;
+
+       vpu_dbg(LVL_FUNC, "%s(), %s, (%d, %d)\n", __func__, q_data->desc,
+                       ctx->core_dev->id, ctx->str_index);
+
+       request_eos(ctx);
+       wait_for_stop_done(ctx);
+
+       ret = vpu_enc_queue_disable(q_data, i);
+
+       return ret;
+}
+
+static const struct v4l2_ioctl_ops vpu_enc_v4l2_ioctl_ops = {
+       .vidioc_querycap                = vpu_enc_v4l2_ioctl_querycap,
+       .vidioc_enum_fmt_vid_cap_mplane = vpu_enc_v4l2_ioctl_enum_fmt_vid_cap_mplane,
+       .vidioc_enum_fmt_vid_out_mplane = vpu_enc_v4l2_ioctl_enum_fmt_vid_out_mplane,
+       .vidioc_enum_framesizes         = vpu_enc_v4l2_ioctl_enum_framesizes,
+       .vidioc_enum_frameintervals     = vpu_enc_v4l2_ioctl_enum_frameintervals,
+       .vidioc_g_fmt_vid_cap_mplane    = vpu_enc_v4l2_ioctl_g_fmt,
+       .vidioc_g_fmt_vid_out_mplane    = vpu_enc_v4l2_ioctl_g_fmt,
+       .vidioc_try_fmt_vid_cap_mplane  = vpu_enc_v4l2_ioctl_try_fmt,
+       .vidioc_try_fmt_vid_out_mplane  = vpu_enc_v4l2_ioctl_try_fmt,
+       .vidioc_s_fmt_vid_cap_mplane    = vpu_enc_v4l2_ioctl_s_fmt,
+       .vidioc_s_fmt_vid_out_mplane    = vpu_enc_v4l2_ioctl_s_fmt,
+       .vidioc_g_parm                  = vpu_enc_v4l2_ioctl_g_parm,
+       .vidioc_s_parm                  = vpu_enc_v4l2_ioctl_s_parm,
+       .vidioc_expbuf                  = vpu_enc_v4l2_ioctl_expbuf,
+       .vidioc_g_crop                  = vpu_enc_v4l2_ioctl_g_crop,
+       .vidioc_s_crop                  = vpu_enc_v4l2_ioctl_s_crop,
+       .vidioc_encoder_cmd             = vpu_enc_v4l2_ioctl_encoder_cmd,
+       .vidioc_subscribe_event         = vpu_enc_v4l2_ioctl_subscribe_event,
+       .vidioc_unsubscribe_event       = v4l2_event_unsubscribe,
+       .vidioc_reqbufs                 = vpu_enc_v4l2_ioctl_reqbufs,
+       .vidioc_querybuf                = vpu_enc_v4l2_ioctl_querybuf,
+       .vidioc_qbuf                    = vpu_enc_v4l2_ioctl_qbuf,
+       .vidioc_dqbuf                   = vpu_enc_v4l2_ioctl_dqbuf,
+       .vidioc_streamon                = vpu_enc_v4l2_ioctl_streamon,
+       .vidioc_streamoff               = vpu_enc_v4l2_ioctl_streamoff,
+};
+
+static void vpu_core_send_cmd(struct core_device *core, u32 idx,
+                               u32 cmdid, u32 cmdnum, u32 *local_cmddata)
+{
+       WARN_ON(!core || idx >= VID_API_NUM_STREAMS);
+
+       vpu_log_cmd(cmdid, idx);
+       count_cmd(&core->attr[idx], cmdid);
+
+       mutex_lock(&core->cmd_mutex);
+       rpc_send_cmd_buf_encoder(&core->shared_mem, idx,
+                               cmdid, cmdnum, local_cmddata);
+       mb();
+       MU_SendMessage(core->mu_base_virtaddr, 0, COMMAND);
+       mutex_unlock(&core->cmd_mutex);
+}
+
+static void vpu_ctx_send_cmd(struct vpu_ctx *ctx, uint32_t cmdid,
+                               uint32_t cmdnum, uint32_t *local_cmddata)
+{
+       vpu_core_send_cmd(ctx->core_dev, ctx->str_index,
+                               cmdid, cmdnum, local_cmddata);
+}
+
+static void set_core_fw_status(struct core_device *core, bool status)
+{
+       core->fw_is_ready = status;
+}
+
+static int reset_vpu_core_dev(struct core_device *core_dev)
+{
+       if (!core_dev)
+               return -EINVAL;
+
+       set_core_fw_status(core_dev, false);
+       core_dev->firmware_started = false;
+
+       return 0;
+}
+
+static int sw_reset_firmware(struct core_device *core, int resume)
+{
+       int ret = 0;
+
+       WARN_ON(!core);
+
+       vpu_dbg(LVL_INFO, "core[%d] sw reset firmware\n", core->id);
+
+       init_completion(&core->start_cmp);
+       vpu_core_send_cmd(core, 0, GTB_ENC_CMD_FIRM_RESET, 0, NULL);
+       ret = wait_for_start_done(core, resume);
+       if (ret) {
+               set_core_hang(core);
+               return -EINVAL;
+       }
+       core->reset_times++;
+
+       return 0;
+}
+
+static int process_core_hang(struct core_device *core)
+{
+       int ret;
+       int i;
+       int instance_count = 0;
+
+       if (!core->hang)
+               return 0;
+
+       for (i = 0; i < core->supported_instance_count; i++) {
+               if (core->ctx[i])
+                       instance_count++;
+       }
+
+       if (instance_count)
+               return -EBUSY;
+
+       ret = sw_reset_firmware(core, 0);
+       if (ret)
+               return ret;
+
+       clear_core_hang(core);
+       return 0;
+}
+
+static void show_codec_configure(pMEDIAIP_ENC_PARAM param)
+{
+       if (!param)
+               return;
+
+       vpu_dbg(LVL_INFO, "Encoder Parameter:\n");
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Codec Mode", param->eCodecMode);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Profile", param->eProfile);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Level", param->uLevel);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Mem Phys Addr", param->tEncMemDesc.uMemPhysAddr);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Mem Virt Addr", param->tEncMemDesc.uMemVirtAddr);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Mem Size", param->tEncMemDesc.uMemSize);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Frame Rate", param->uFrameRate);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Source Stride", param->uSrcStride);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Source Width", param->uSrcWidth);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Source Height", param->uSrcHeight);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Source Offset x", param->uSrcOffset_x);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Source Offset y", param->uSrcOffset_y);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Source Crop Width", param->uSrcCropWidth);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Source Crop Height", param->uSrcCropHeight);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Out Width", param->uOutWidth);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Out Height", param->uOutHeight);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "I Frame Interval", param->uIFrameInterval);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "GOP Length", param->uGopBLength);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Low Latency Mode", param->uLowLatencyMode);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Bitrate Mode", param->eBitRateMode);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Target Bitrate", param->uTargetBitrate);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Min Bitrate", param->uMinBitRate);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "Max Bitrate", param->uMaxBitRate);
+       vpu_dbg(LVL_INFO, "\t%20s:%16d\n",
+                       "QP", param->uInitSliceQP);
+}
+
+static void show_firmware_version(struct core_device *core_dev,
+                               unsigned int level)
+{
+       pENC_RPC_HOST_IFACE pSharedInterface;
+
+       if (!core_dev)
+               return;
+
+       pSharedInterface = core_dev->shared_mem.pSharedInterface;
+
+       vpu_dbg(level, "vpu encoder core[%d] firmware version is %d.%d.%d\n",
+                       core_dev->id,
+                       (pSharedInterface->FWVersion & 0x00ff0000) >> 16,
+                       (pSharedInterface->FWVersion & 0x0000ff00) >> 8,
+                       pSharedInterface->FWVersion & 0x000000ff);
+}
+
+static void update_encode_size(struct vpu_ctx *ctx)
+{
+       struct queue_data *src = NULL;
+       struct queue_data *dst = NULL;
+       struct vpu_attr *attr;
+       pMEDIAIP_ENC_PARAM  pEncParam;
+
+       if (!ctx)
+               return;
+
+       attr = get_vpu_ctx_attr(ctx);
+       if (!attr)
+               return;
+
+       src = &ctx->q_data[V4L2_SRC];
+       dst = &ctx->q_data[V4L2_DST];
+       pEncParam = &attr->param;
+
+       pEncParam->uSrcStride           = src->width;
+       pEncParam->uSrcWidth            = src->width;
+       pEncParam->uSrcHeight           = src->height;
+       pEncParam->uSrcOffset_x         = src->rect.left;
+       pEncParam->uSrcOffset_y         = src->rect.top;
+       pEncParam->uSrcCropWidth        = src->rect.width;
+       pEncParam->uSrcCropHeight       = src->rect.height;
+       pEncParam->uOutWidth            = min(dst->width, src->rect.width);
+       pEncParam->uOutHeight           = min(dst->height, src->rect.height);
+}
+
+static void init_ctx_seq_info(struct vpu_ctx *ctx)
+{
+       int i;
+
+       if (!ctx)
+               return;
+
+       ctx->sequence = 0;
+       for (i = 0; i < ARRAY_SIZE(ctx->timestams); i++)
+               ctx->timestams[i] = VPU_ENC_INVALID_TIMESTAMP;
+}
+
+static void fill_ctx_seq(struct vpu_ctx *ctx, struct vb2_data_req *p_data_req)
+{
+       u_int32 idx;
+
+       WARN_ON(!ctx || !p_data_req);
+
+       p_data_req->sequence = ctx->sequence++;
+       idx = p_data_req->sequence % VPU_ENC_SEQ_CAPACITY;
+       if (ctx->timestams[idx] != VPU_ENC_INVALID_TIMESTAMP) {
+               count_timestamp_overwrite(ctx);
+               vpu_dbg(LVL_FRAME, "[%d.%d][%d] overwrite timestamp\n",
+                       ctx->core_dev->id, ctx->str_index,
+                       p_data_req->sequence);
+       }
+       ctx->timestams[idx] = p_data_req->vb2_buf->timestamp;
+}
+
+static s64 get_ctx_seq_timestamp(struct vpu_ctx *ctx, u32 sequence)
+{
+       s64 timestamp;
+       u_int32 idx;
+
+       WARN_ON(!ctx);
+
+       idx = sequence % VPU_ENC_SEQ_CAPACITY;
+       timestamp = ctx->timestams[idx];
+       ctx->timestams[idx] = VPU_ENC_INVALID_TIMESTAMP;
+
+       return timestamp;
+}
+
+static void fill_vb_sequence(struct vb2_buffer *vb, u32 sequence)
+{
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+       vbuf->sequence = sequence;
+}
+
+static struct vb2_data_req *find_vb2_data_by_sequence(struct queue_data *queue,
+                                                       u32 sequence)
+{
+       int i;
+
+       for (i = 0; i < queue->vb2_q.num_buffers; i++) {
+               if (!queue->vb2_reqs[i].vb2_buf)
+                       continue;
+               if (queue->vb2_reqs[i].sequence == sequence)
+                       return &queue->vb2_reqs[i];
+       }
+
+       return NULL;
+}
+
+static int do_configure_codec(struct vpu_ctx *ctx)
+{
+       pBUFFER_DESCRIPTOR_TYPE pEncStrBuffDesc = NULL;
+       pMEDIAIP_ENC_EXPERT_MODE_PARAM pEncExpertModeParam = NULL;
+       pMEDIAIP_ENC_PARAM enc_param;
+       struct vpu_attr *attr;
+
+       if (!ctx || !ctx->core_dev)
+               return -EINVAL;
+
+       attr = get_vpu_ctx_attr(ctx);
+       if (!attr)
+               return -EINVAL;
+
+       if (vpu_enc_alloc_stream(ctx))
+               return -ENOMEM;
+
+       update_encode_size(ctx);
+
+       enc_param = get_rpc_enc_param(ctx);
+       pEncStrBuffDesc = get_rpc_stream_buffer_desc(ctx);
+
+       pEncStrBuffDesc->start = ctx->encoder_stream.phy_addr;
+       pEncStrBuffDesc->wptr = pEncStrBuffDesc->start;
+       pEncStrBuffDesc->rptr = pEncStrBuffDesc->start;
+       pEncStrBuffDesc->end = ctx->encoder_stream.phy_addr +
+                               ctx->encoder_stream.size;
+
+       vpu_dbg(LVL_DEBUG,
+               "pEncStrBuffDesc:start=%x, wptr=0x%x, rptr=%x, end=%x\n",
+               pEncStrBuffDesc->start,
+               pEncStrBuffDesc->wptr,
+               pEncStrBuffDesc->rptr,
+               pEncStrBuffDesc->end);
+
+       pEncExpertModeParam = get_rpc_expert_mode_param(ctx);
+       pEncExpertModeParam->Calib.mem_chunk_phys_addr = 0;
+       pEncExpertModeParam->Calib.mem_chunk_virt_addr = 0;
+       pEncExpertModeParam->Calib.mem_chunk_size = 0;
+       pEncExpertModeParam->Calib.cb_base = ctx->encoder_stream.phy_addr;
+       pEncExpertModeParam->Calib.cb_size = ctx->encoder_stream.size;
+
+       show_firmware_version(ctx->core_dev, LVL_INFO);
+       clear_stop_status(ctx);
+       memcpy(enc_param, &attr->param, sizeof(attr->param));
+       vpu_ctx_send_cmd(ctx, GTB_ENC_CMD_CONFIGURE_CODEC, 0, NULL);
+
+       show_codec_configure(enc_param);
+
+       return 0;
+}
+
+static int check_vpu_ctx_is_ready(struct vpu_ctx *ctx)
+{
+       if (!ctx)
+               return false;
+
+       if (!test_bit(VPU_ENC_STATUS_OUTPUT_READY, &ctx->status))
+               return false;
+       if (!test_bit(VPU_ENC_STATUS_DATA_READY, &ctx->status))
+               return false;
+
+       return true;
+}
+
+static int configure_codec(struct vpu_ctx *ctx)
+{
+       if (!ctx)
+               return -EINVAL;
+
+       if (!check_vpu_ctx_is_ready(ctx))
+               return 0;
+
+       if (ctx->core_dev->snapshot)
+               return 0;
+
+       if (test_bit(VPU_ENC_STATUS_SNAPSHOT, &ctx->status))
+               return 0;
+
+       if (!test_and_set_bit(VPU_ENC_STATUS_CONFIGURED, &ctx->status)) {
+               do_configure_codec(ctx);
+               clear_bit(VPU_ENC_STATUS_OUTPUT_READY, &ctx->status);
+               clear_bit(VPU_ENC_STATUS_DATA_READY, &ctx->status);
+       }
+
+       return 0;
+}
+
+static void dump_vb2_data(struct vb2_buffer *vb)
+{
+#ifdef DUMP_DATA
+       const int DATA_NUM = 10;
+       char *read_data;
+       u_int32 read_idx;
+       char data_str[1024];
+       int num = 0;
+
+       if (!vb)
+               return;
+
+       read_data = vb2_plane_vaddr(vb, 0);
+       num = scnprintf(data_str, sizeof(data_str),
+                       "transfer data from virt 0x%p: ", read_data);
+       for (read_idx = 0; read_idx < DATA_NUM; read_idx++)
+               num += scnprintf(data_str + num, sizeof(data_str) - num,
+                               " 0x%x", read_data[read_idx]);
+
+       vpu_dbg(LVL_DEBUG, "%s\n", data_str);
+#endif
+}
+
+static u32 get_vb2_plane_phy_addr(struct vb2_buffer *vb, unsigned int plane_no)
+{
+       dma_addr_t *dma_addr;
+
+       dma_addr = vb2_plane_cookie(vb, plane_no);
+       return *dma_addr + vb->planes[plane_no].data_offset;
+}
+
+static void record_start_time(struct vpu_ctx *ctx, enum QUEUE_TYPE type)
+{
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       struct timespec ts;
+
+       if (!attr)
+               return;
+
+       if (attr->ts_start[type])
+               return;
+
+       getrawmonotonic(&ts);
+       attr->ts_start[type] = ts.tv_sec * MSEC_PER_SEC +
+                               ts.tv_nsec / NSEC_PER_MSEC;
+}
+
+static bool update_yuv_addr(struct vpu_ctx *ctx)
+{
+       bool bGotAFrame = FALSE;
+
+       struct vb2_data_req *p_data_req;
+       struct queue_data *This = &ctx->q_data[V4L2_SRC];
+
+       pMEDIAIP_ENC_YUV_BUFFER_DESC desc;
+
+       desc = get_rpc_yuv_buffer_desc(ctx);
+
+       if (list_empty(&This->drv_q))
+               return bGotAFrame;
+
+       p_data_req = list_first_entry(&This->drv_q, typeof(*p_data_req), list);
+
+       dump_vb2_data(p_data_req->vb2_buf);
+
+       desc->uLumaBase = get_vb2_plane_phy_addr(p_data_req->vb2_buf, 0);
+       desc->uChromaBase = get_vb2_plane_phy_addr(p_data_req->vb2_buf, 1);
+
+       if (desc->uLumaBase != 0)
+               bGotAFrame = TRUE;
+
+       /*
+        * keeps increasing,
+        * so just a frame input count rather than a Frame buffer ID
+        */
+       desc->uFrameID = p_data_req->sequence;
+       if (test_and_clear_bit(VPU_ENC_STATUS_KEY_FRAME, &ctx->status))
+               desc->uKeyFrame = 1;
+       else
+               desc->uKeyFrame = 0;
+       list_del(&p_data_req->list);
+
+       return bGotAFrame;
+}
+
+static void get_kmp_next(const u8 *p, int *next, int size)
+{
+       int k = -1;
+       int j = 0;
+
+       next[0] = -1;
+       while (j < size - 1) {
+               if (k == -1 || p[j] == p[k]) {
+                       ++k;
+                       ++j;
+                       next[j] = k;
+               } else {
+                       k = next[k];
+               }
+       }
+}
+
+static int kmp_serach(u8 *s, int s_len, const u8 *p, int p_len, int *next)
+{
+       int i = 0;
+       int j = 0;
+
+       while (i < s_len && j < p_len) {
+               if (j == -1 || s[i] == p[j]) {
+                       i++;
+                       j++;
+               } else {
+                       j = next[j];
+               }
+       }
+       if (j == p_len)
+               return i - j;
+       else
+               return -1;
+}
+
+static int get_stuff_data_size(u8 *data, int size)
+{
+       const u8 pattern[] = VPU_STRM_END_PATTERN;
+       int next[] = VPU_STRM_END_PATTERN;
+       int index;
+
+       if (size < ARRAY_SIZE(pattern))
+               return 0;
+
+       get_kmp_next(pattern, next, ARRAY_SIZE(pattern));
+       index =  kmp_serach(data, size, pattern, ARRAY_SIZE(pattern), next);
+       if (index < 0)
+               return 0;
+       vpu_dbg(LVL_DEBUG, "find end_of_stream nal\n");
+       return size - index;
+}
+
+static void count_strip_info(struct vpu_strip_info *info, u32 bytes)
+{
+       if (!info)
+               return;
+
+       info->count++;
+       info->total += bytes;
+       if (info->max < bytes)
+               info->max = bytes;
+}
+
+static void strip_stuff_data_on_tail(struct vpu_ctx *ctx, struct vb2_buffer *vb)
+{
+       u8 *ptr = vb2_plane_vaddr(vb, 0);
+       unsigned long bytesused = vb2_get_plane_payload(vb, 0);
+       int count = VPU_TAIL_SERACH_SIZE;
+       int stuff_size;
+
+       if (count > bytesused)
+               count = bytesused;
+
+       if (!count)
+               return;
+
+       stuff_size = get_stuff_data_size(ptr + bytesused - count, count);
+       if (stuff_size) {
+               struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+
+               if (attr)
+                       count_strip_info(&attr->statistic.strip_sts.eos,
+                                       stuff_size);
+
+               vpu_dbg(LVL_DEBUG, "strip %d bytes stuff data\n", stuff_size);
+               vb2_set_plane_payload(vb, 0, bytesused - stuff_size);
+       }
+}
+
+static int check_enc_rw_flag(int flag)
+{
+       int ret = -EINVAL;
+
+       switch (flag) {
+       case VPU_ENC_FLAG_WRITEABLE:
+       case VPU_ENC_FLAG_READABLE:
+               ret = 0;
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+static void set_queue_rw_flag(struct queue_data *queue, int flag)
+{
+       if (!queue)
+               return;
+
+       if (check_enc_rw_flag(flag))
+               return;
+
+       set_bit(flag, &queue->rw_flag);
+}
+
+static void clear_queue_rw_flag(struct queue_data *queue, int flag)
+{
+       if (!queue)
+               return;
+
+       if (check_enc_rw_flag(flag))
+               return;
+
+       clear_bit(flag, &queue->rw_flag);
+}
+
+static int submit_input_and_encode(struct vpu_ctx *ctx)
+{
+       struct queue_data *queue;
+
+       if (!ctx)
+               return -EINVAL;
+
+       queue = &ctx->q_data[V4L2_SRC];
+
+       down(&queue->drv_q_lock);
+
+       if (!test_bit(VPU_ENC_FLAG_WRITEABLE, &queue->rw_flag))
+               goto exit;
+
+       if (list_empty(&queue->drv_q))
+               goto exit;
+
+       if (test_bit(VPU_ENC_STATUS_STOP_SEND, &ctx->status))
+               goto exit;
+       if (!test_bit(VPU_ENC_STATUS_START_DONE, &ctx->status))
+               goto exit;
+
+       if (update_yuv_addr(ctx)) {
+               vpu_ctx_send_cmd(ctx, GTB_ENC_CMD_FRAME_ENCODE, 0, NULL);
+               clear_queue_rw_flag(queue, VPU_ENC_FLAG_WRITEABLE);
+               record_start_time(ctx, V4L2_SRC);
+       }
+exit:
+       up(&queue->drv_q_lock);
+
+       return 0;
+}
+
+static void add_rptr(struct vpu_frame_info *frame, u32 length)
+{
+       WARN_ON(!frame);
+
+       frame->rptr += length;
+       if (frame->rptr >= frame->end)
+               frame->rptr -= (frame->end - frame->start);
+}
+
+static void report_frame_type(struct vb2_data_req *p_data_req,
+                               struct vpu_frame_info *frame)
+{
+       WARN_ON(!p_data_req || !frame);
+
+       switch (frame->info.ePicType) {
+       case MEDIAIP_ENC_PIC_TYPE_IDR_FRAME:
+       case MEDIAIP_ENC_PIC_TYPE_I_FRAME:
+               p_data_req->buffer_flags = V4L2_BUF_FLAG_KEYFRAME;
+               break;
+       case MEDIAIP_ENC_PIC_TYPE_P_FRAME:
+               p_data_req->buffer_flags = V4L2_BUF_FLAG_PFRAME;
+               break;
+       case MEDIAIP_ENC_PIC_TYPE_B_FRAME:
+               p_data_req->buffer_flags = V4L2_BUF_FLAG_BFRAME;
+               break;
+       default:
+               break;
+       }
+}
+
+static u32 calc_frame_length(struct vpu_frame_info *frame)
+{
+       u32 length;
+       u32 buffer_size;
+
+       WARN_ON(!frame);
+
+       if (frame->eos)
+               return 0;
+
+       buffer_size = frame->end - frame->start;
+       if (!buffer_size)
+               return 0;
+
+       length = (buffer_size + frame->wptr - frame->rptr) % buffer_size;
+
+       return length;
+}
+
+static u32 get_ptr(u32 ptr)
+{
+       return (ptr | 0x80000000);
+}
+
+static void *get_rptr_virt(struct vpu_ctx *ctx, struct vpu_frame_info *frame)
+{
+       WARN_ON(!ctx || !frame);
+
+       return ctx->encoder_stream.virt_addr + frame->rptr - frame->start;
+}
+
+static int find_nal_begin(u8 *data, u32 size)
+{
+       const u8 pattern[] = VPU_STRM_BEGIN_PATTERN;
+       int next[] = VPU_STRM_BEGIN_PATTERN;
+       u32 len;
+       int index;
+
+       len = ARRAY_SIZE(pattern);
+       get_kmp_next(pattern, next, len);
+       index = kmp_serach(data, size, pattern, len, next);
+       if (index > 0 && data[index - 1] == 0)
+               index--;
+
+       return index;
+}
+
+static int find_frame_start_and_skip(struct vpu_ctx *ctx,
+                       struct vpu_frame_info *frame, int skip)
+{
+       u32 length;
+       u32 bytesskiped = 0;
+       u8 *data = get_rptr_virt(ctx, frame);
+       int index;
+
+       length = frame->bytesleft;
+       if (frame->rptr + length <= frame->end) {
+               index = find_nal_begin(data, length);
+               if (index >= 0)
+                       bytesskiped += index;
+               else
+                       bytesskiped += length;
+       } else {
+               u32 size = frame->end - frame->rptr;
+
+               index = find_nal_begin(data, size);
+               if (index >= 0) {
+                       bytesskiped += index;
+               } else {
+                       bytesskiped += size;
+
+                       data = ctx->encoder_stream.virt_addr;
+                       size = length - size;
+                       index = find_nal_begin(data, size);
+                       if (index >= 0)
+                               bytesskiped += index;
+                       else
+                               bytesskiped += size;
+               }
+       }
+
+       if (skip && bytesskiped) {
+               add_rptr(frame, bytesskiped);
+               frame->bytesleft -= bytesskiped;
+       }
+
+       return bytesskiped;
+}
+
+static int transfer_stream_output(struct vpu_ctx *ctx,
+                                       struct vpu_frame_info *frame,
+                                       struct vb2_data_req *p_data_req)
+{
+       struct vb2_buffer *vb = NULL;
+       u32 length;
+       void *pdst;
+
+       WARN_ON(!ctx || !frame || !p_data_req);
+
+       length = frame->bytesleft;
+
+       vb = p_data_req->vb2_buf;
+       if (length > vb->planes[0].length)
+               length = vb->planes[0].length;
+       vb2_set_plane_payload(vb, 0, length);
+
+       pdst = vb2_plane_vaddr(vb, 0);
+       if (frame->rptr + length <= frame->end) {
+               memcpy(pdst, get_rptr_virt(ctx, frame), length);
+               frame->bytesleft -= length;
+               add_rptr(frame, length);
+       } else {
+               u32 offset = frame->end - frame->rptr;
+
+               memcpy(pdst, get_rptr_virt(ctx, frame), offset);
+               frame->bytesleft -= offset;
+               add_rptr(frame, offset);
+               length -= offset;
+               memcpy(pdst + offset, get_rptr_virt(ctx, frame), length);
+               frame->bytesleft -= length;
+               add_rptr(frame, length);
+       }
+       report_frame_type(p_data_req, frame);
+       if (frame->bytesleft)
+               return 0;
+
+       strip_stuff_data_on_tail(ctx, p_data_req->vb2_buf);
+
+       return 0;
+}
+
+static int append_empty_end_frame(struct vb2_data_req *p_data_req)
+{
+       struct vb2_buffer *vb = NULL;
+       const u8 pattern[] = VPU_STRM_END_PATTERN;
+       void *pdst;
+
+       WARN_ON(!p_data_req);
+
+       vb = p_data_req->vb2_buf;
+       pdst = vb2_plane_vaddr(vb, 0);
+       memcpy(pdst, pattern, ARRAY_SIZE(pattern));
+
+       vb2_set_plane_payload(vb, 0, ARRAY_SIZE(pattern));
+       p_data_req->buffer_flags = V4L2_BUF_FLAG_LAST;
+
+       vpu_dbg(LVL_INFO, "append the last frame\n");
+
+       return 0;
+}
+
+static bool is_valid_frame_read_pos(u32 ptr, struct vpu_frame_info *frame)
+{
+       if (ptr < frame->start || ptr >= frame->end)
+               return false;
+       if (ptr >= frame->rptr && ptr < frame->wptr)
+               return true;
+       if (frame->rptr > frame->wptr) {
+               if (ptr >= frame->rptr || ptr < frame->wptr)
+                       return true;
+       }
+
+       return false;
+}
+static int precheck_frame(struct vpu_ctx *ctx, struct vpu_frame_info *frame)
+{
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       u32 length;
+       int bytesskiped;
+       u32 rptr;
+
+       if (frame->eos)
+               return 0;
+       if (!frame->is_start)
+               return 0;
+
+       add_rptr(frame, 0);
+       frame->is_start = false;
+       rptr = get_ptr(frame->info.uStrBuffWrPtr);
+       if (rptr == frame->end)
+               rptr = frame->start;
+
+       if (is_valid_frame_read_pos(rptr, frame)) {
+               if (rptr != frame->rptr) {
+                       vpu_dbg(LVL_DEBUG, "frame skip %d bytes\n",
+                                       rptr - frame->rptr);
+                       count_strip_info(&attr->statistic.strip_sts.fw,
+                                       rptr - frame->rptr);
+               }
+               frame->rptr = rptr;
+       } else {
+               vpu_err("[%ld]wrong uStrBuffWrPtr:0x%x\n", frame->index, rptr);
+       }
+
+       length = calc_frame_length(frame);
+       if (!length || length < frame->bytesleft) {
+               vpu_err("[%d][%d]'s frame[%ld] invalid, want %d but %d, drop\n",
+                               ctx->core_dev->id, ctx->str_index,
+                               frame->index, frame->bytesleft, length);
+               vpu_err("uStrBuffWrPtr = 0x%x, uFrameSize = 0x%x\n",
+                       frame->info.uStrBuffWrPtr, frame->info.uFrameSize);
+               add_rptr(frame, length);
+               return -EINVAL;
+       }
+
+       bytesskiped = find_frame_start_and_skip(ctx, frame, 0);
+       if (!bytesskiped)
+               return 0;
+
+       if (attr)
+               count_strip_info(&attr->statistic.strip_sts.begin, bytesskiped);
+
+       return 0;
+}
+
+static int inc_frame(struct queue_data *queue)
+{
+       struct vpu_frame_info *frame = NULL;
+
+       if (!queue)
+               return -EINVAL;
+
+       frame = vzalloc(sizeof(*frame));
+       if (!frame)
+               return -EINVAL;
+
+       frame->queue = queue;
+       list_add_tail(&frame->list, &queue->frame_idle);
+       atomic64_inc(&queue->frame_count);
+
+       vpu_dbg(LVL_DEBUG, "++ frame : %ld\n",
+                       atomic64_read(&queue->frame_count));
+
+       return 0;
+}
+
+static void dec_frame(struct vpu_frame_info *frame)
+{
+       if (!frame)
+               return;
+       list_del_init(&frame->list);
+       if (frame->queue) {
+               atomic64_dec(&frame->queue->frame_count);
+
+               vpu_dbg(LVL_DEBUG, "-- frame : %ld\n",
+                               atomic64_read(&frame->queue->frame_count));
+       }
+       VPU_SAFE_RELEASE(frame, vfree);
+}
+
+static struct vpu_frame_info *get_idle_frame(struct queue_data *queue)
+{
+       struct vpu_frame_info *frame = NULL;
+
+       if (!queue)
+               return NULL;
+
+       if (list_empty(&queue->frame_idle))
+               inc_frame(queue);
+       frame = list_first_entry(&queue->frame_idle,
+                               struct vpu_frame_info, list);
+       if (frame)
+               list_del_init(&frame->list);
+
+       return frame;
+}
+
+static void put_frame_idle(struct vpu_frame_info *frame)
+{
+       struct queue_data *queue;
+
+       if (!frame)
+               return;
+
+       list_del_init(&frame->list);
+       memset(&frame->info, 0, sizeof(frame->info));
+       frame->bytesleft = 0;
+       frame->wptr = 0;
+       frame->rptr = 0;
+       frame->start = 0;
+       frame->end = 0;
+       frame->eos = false;
+       frame->is_start = false;
+       frame->index = 0;
+       frame->timestamp = VPU_ENC_INVALID_TIMESTAMP;
+       queue = frame->queue;
+       if (queue && atomic64_read(&queue->frame_count) <= FRAME_COUNT_THD)
+               list_add_tail(&frame->list, &queue->frame_idle);
+       else
+               dec_frame(frame);
+}
+
+static bool process_frame_done(struct queue_data *queue)
+{
+       struct vpu_ctx *ctx;
+       struct vb2_data_req *p_data_req = NULL;
+       struct vpu_frame_info *frame = NULL;
+       pBUFFER_DESCRIPTOR_TYPE stream_buffer_desc;
+
+       WARN_ON(!queue || !queue->ctx);
+
+       ctx = queue->ctx;
+
+       stream_buffer_desc = get_rpc_stream_buffer_desc(ctx);
+
+       if (list_empty(&queue->frame_q))
+               return false;
+
+       frame = list_first_entry(&queue->frame_q, typeof(*frame), list);
+       if (!frame)
+               return false;
+
+       frame->rptr = get_ptr(stream_buffer_desc->rptr);
+
+       if (precheck_frame(ctx, frame)) {
+               stream_buffer_desc->rptr = frame->rptr;
+               put_frame_idle(frame);
+               frame = NULL;
+               return true;
+       }
+
+       if (list_empty(&queue->drv_q))
+               return false;
+
+       p_data_req = list_first_entry(&queue->drv_q, typeof(*p_data_req), list);
+
+       if (frame->eos)
+               append_empty_end_frame(p_data_req);
+       else
+               transfer_stream_output(ctx, frame, p_data_req);
+
+       stream_buffer_desc->rptr = frame->rptr;
+       if (!frame->eos) {
+               fill_vb_sequence(p_data_req->vb2_buf, frame->info.uFrameID);
+               p_data_req->vb2_buf->timestamp = frame->timestamp;
+       }
+       if (!frame->bytesleft) {
+               put_frame_idle(frame);
+               frame = NULL;
+       }
+       list_del(&p_data_req->list);
+
+       if (p_data_req->vb2_buf->state == VB2_BUF_STATE_ACTIVE)
+               vb2_buffer_done(p_data_req->vb2_buf, VB2_BUF_STATE_DONE);
+       else
+               vpu_err("vb2_buf's state is invalid(%d\n)",
+                               p_data_req->vb2_buf->state);
+
+       return true;
+}
+
+static int process_stream_output(struct vpu_ctx *ctx)
+{
+       struct queue_data *queue = NULL;
+
+       if (!ctx)
+               return -EINVAL;
+
+       queue = &ctx->q_data[V4L2_DST];
+
+       down(&queue->drv_q_lock);
+       while (1) {
+               if (!process_frame_done(queue))
+                       break;
+       }
+       up(&queue->drv_q_lock);
+
+       return 0;
+}
+
+static void show_enc_pic_info(MEDIAIP_ENC_PIC_INFO *pEncPicInfo)
+{
+#ifdef TB_REC_DBG
+       vpu_dbg(LVL_DEBUG, "       - Frame ID      : 0x%x\n",
+                       pEncPicInfo->uFrameID);
+
+       switch (pEncPicInfo->ePicType) {
+       case MEDIAIP_ENC_PIC_TYPE_IDR_FRAME:
+               vpu_dbg(LVL_DEBUG, "       - Picture Type  : IDR picture\n");
+               break;
+       case MEDIAIP_ENC_PIC_TYPE_I_FRAME:
+               vpu_dbg(LVL_DEBUG, "       - Picture Type  : I picture\n");
+               break;
+       case MEDIAIP_ENC_PIC_TYPE_P_FRAME:
+               vpu_dbg(LVL_DEBUG, "       - Picture Type  : P picture\n");
+               break;
+       case MEDIAIP_ENC_PIC_TYPE_B_FRAME:
+               vpu_dbg(LVL_DEBUG, "       - Picture Type  : B picture\n");
+               break;
+       default:
+               vpu_dbg(LVL_DEBUG, "       - Picture Type  : BI picture\n");
+               break;
+       }
+       vpu_dbg(LVL_DEBUG, "       - Skipped frame : 0x%x\n",
+                       pEncPicInfo->uSkippedFrame);
+       vpu_dbg(LVL_DEBUG, "       - Frame size    : 0x%x\n",
+                       pEncPicInfo->uFrameSize);
+       vpu_dbg(LVL_DEBUG, "       - Frame CRC     : 0x%x\n",
+                       pEncPicInfo->uFrameCrc);
+#endif
+}
+
+static int handle_event_start_done(struct vpu_ctx *ctx)
+{
+       if (!ctx)
+               return -EINVAL;
+
+       set_bit(VPU_ENC_STATUS_START_DONE, &ctx->status);
+       set_queue_rw_flag(&ctx->q_data[V4L2_SRC], VPU_ENC_FLAG_WRITEABLE);
+       submit_input_and_encode(ctx);
+
+       enable_fps_sts(get_vpu_ctx_attr(ctx));
+
+       return 0;
+}
+
+static int handle_event_mem_request(struct vpu_ctx *ctx,
+                               MEDIAIP_ENC_MEM_REQ_DATA *req_data)
+{
+       int ret;
+
+       if (!ctx || !req_data)
+               return -EINVAL;
+
+       ret = vpu_enc_alloc_mem(ctx, req_data, get_rpc_mem_pool(ctx));
+       if (ret) {
+               vpu_err("fail to alloc encoder memory\n");
+               return ret;
+       }
+       vpu_ctx_send_cmd(ctx, GTB_ENC_CMD_STREAM_START, 0, NULL);
+       set_bit(VPU_ENC_STATUS_START_SEND, &ctx->status);
+
+       return 0;
+}
+
+static int handle_event_frame_done(struct vpu_ctx *ctx,
+                               MEDIAIP_ENC_PIC_INFO *pEncPicInfo)
+{
+       struct queue_data *queue;
+       struct vpu_frame_info *frame;
+       pBUFFER_DESCRIPTOR_TYPE stream_buffer_desc;
+       s64 timestamp;
+
+       if (!ctx || !pEncPicInfo)
+               return -EINVAL;
+
+       vpu_dbg(LVL_DEBUG, "Frame done(%d) - uFrameID = %d\n",
+                       pEncPicInfo->uPicEncodDone, pEncPicInfo->uFrameID);
+
+       queue = &ctx->q_data[V4L2_DST];
+       if (!pEncPicInfo->uPicEncodDone) {
+               vpu_err("Pic Encoder Not Done\n");
+               return -EINVAL;
+       }
+
+       stream_buffer_desc = get_rpc_stream_buffer_desc(ctx);
+       if (stream_buffer_desc->rptr < stream_buffer_desc->start ||
+                       stream_buffer_desc->rptr > stream_buffer_desc->end ||
+                       stream_buffer_desc->wptr < stream_buffer_desc->start ||
+                       stream_buffer_desc->wptr > stream_buffer_desc->end ||
+                       stream_buffer_desc->end - stream_buffer_desc->start !=
+                       ctx->encoder_stream.size) {
+               vpu_err("stream buffer desc is invalid, s:%x,e:%x,r:%x,w:%x\n",
+                               stream_buffer_desc->start,
+                               stream_buffer_desc->end,
+                               stream_buffer_desc->rptr,
+                               stream_buffer_desc->wptr);
+               return -EINVAL;
+       }
+
+       show_enc_pic_info(pEncPicInfo);
+       record_start_time(ctx, V4L2_DST);
+
+       timestamp = get_ctx_seq_timestamp(ctx, pEncPicInfo->uFrameID);
+
+       down(&queue->drv_q_lock);
+       frame = get_idle_frame(queue);
+       if (frame) {
+               struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+
+               memcpy(&frame->info, pEncPicInfo, sizeof(frame->info));
+               frame->bytesleft = frame->info.uFrameSize;
+               frame->wptr = get_ptr(stream_buffer_desc->wptr);
+               frame->rptr = get_ptr(stream_buffer_desc->rptr);
+               frame->start = get_ptr(stream_buffer_desc->start);
+               frame->end = get_ptr(stream_buffer_desc->end);
+               frame->eos = false;
+               frame->is_start = true;
+               frame->timestamp = timestamp;
+               if (attr)
+                       frame->index = attr->statistic.encoded_count;
+
+               list_add_tail(&frame->list, &queue->frame_q);
+       } else {
+               vpu_err("fail to alloc memory for frame info\n");
+       }
+       up(&queue->drv_q_lock);
+       count_encoded_frame(ctx);
+
+       /* Sync the write pointer to the local view of it */
+       process_stream_output(ctx);
+
+       return 0;
+}
+
+static int handle_event_frame_release(struct vpu_ctx *ctx, u_int32 *uFrameID)
+{
+       struct queue_data *This = &ctx->q_data[V4L2_SRC];
+       struct vb2_data_req *p_data_req = NULL;
+
+       if (!ctx || !uFrameID)
+               return -EINVAL;
+
+       This = &ctx->q_data[V4L2_SRC];
+       vpu_dbg(LVL_DEBUG, "Frame release - uFrameID = %d\n", *uFrameID);
+       p_data_req = find_vb2_data_by_sequence(This, *uFrameID);
+       if (!p_data_req) {
+               vpu_err("uFrameID[%d] is invalid\n", *uFrameID);
+               return -EINVAL;
+       }
+       if (p_data_req->vb2_buf->state == VB2_BUF_STATE_ACTIVE)
+               vb2_buffer_done(p_data_req->vb2_buf, VB2_BUF_STATE_DONE);
+
+       return 0;
+}
+
+static int handle_event_stop_done(struct vpu_ctx *ctx)
+{
+       struct queue_data *queue;
+       struct vpu_frame_info *frame;
+
+       WARN_ON(!ctx);
+       queue = &ctx->q_data[V4L2_DST];
+
+       disable_fps_sts(get_vpu_ctx_attr(ctx));
+
+       set_bit(VPU_ENC_STATUS_STOP_DONE, &ctx->status);
+       notify_eos(ctx);
+
+       down(&queue->drv_q_lock);
+       frame = get_idle_frame(queue);
+       if (frame) {
+               frame->eos = true;
+               list_add_tail(&frame->list, &queue->frame_q);
+       } else {
+               vpu_err("fail to alloc memory for last frame\n");
+       }
+       up(&queue->drv_q_lock);
+
+       process_stream_output(ctx);
+
+       clear_start_status(ctx);
+       init_ctx_seq_info(ctx);
+       complete(&ctx->stop_cmp);
+
+       return 0;
+}
+
+static void vpu_enc_event_handler(struct vpu_ctx *ctx,
+                               u_int32 uEvent, u_int32 *event_data)
+{
+       vpu_log_event(uEvent, ctx->str_index);
+       count_event(ctx, uEvent);
+       vpu_enc_check_mem_overstep(ctx);
+
+       switch (uEvent) {
+       case VID_API_ENC_EVENT_START_DONE:
+               handle_event_start_done(ctx);
+               break;
+       case VID_API_ENC_EVENT_MEM_REQUEST:
+               handle_event_mem_request(ctx,
+                               (MEDIAIP_ENC_MEM_REQ_DATA *)event_data);
+               break;
+       case VID_API_ENC_EVENT_PARA_UPD_DONE:
+               break;
+       case VID_API_ENC_EVENT_FRAME_DONE:
+               handle_event_frame_done(ctx,
+                                       (MEDIAIP_ENC_PIC_INFO *)event_data);
+               break;
+       case VID_API_ENC_EVENT_FRAME_RELEASE:
+               handle_event_frame_release(ctx, (u_int32 *)event_data);
+               break;
+       case VID_API_ENC_EVENT_STOP_DONE:
+               handle_event_stop_done(ctx);
+               break;
+       case VID_API_ENC_EVENT_FRAME_INPUT_DONE:
+               set_queue_rw_flag(&ctx->q_data[V4L2_SRC],
+                               VPU_ENC_FLAG_WRITEABLE);
+               response_stop_stream(ctx);
+               submit_input_and_encode(ctx);
+               break;
+       case VID_API_ENC_EVENT_TERMINATE_DONE:
+               break;
+       case VID_API_ENC_EVENT_RESET_DONE:
+               break;
+       case VID_API_ENC_EVENT_FIRMWARE_XCPT:
+               vpu_err("firmware exception:%s\n", (char *)event_data);
+               break;
+       default:
+               vpu_err("........unknown event : 0x%x\n", uEvent);
+               break;
+       }
+}
+
+static void enable_mu(struct core_device *dev)
+{
+       u32 mu_addr;
+
+       vpu_dbg(LVL_ALL, "enable mu for core[%d]\n", dev->id);
+
+       rpc_init_shared_memory_encoder(&dev->shared_mem,
+                               cpu_phy_to_mu(dev, dev->m0_rpc_phy),
+                               dev->m0_rpc_virt, dev->rpc_buf_size,
+                               &dev->rpc_actual_size);
+       rpc_set_system_cfg_value_encoder(dev->shared_mem.pSharedInterface,
+                               dev->vdev->reg_rpc_system, dev->id);
+
+       if (dev->rpc_actual_size > dev->rpc_buf_size)
+               vpu_err("rpc actual size(0x%x) > (0x%x), may occur overlay\n",
+                       dev->rpc_actual_size, dev->rpc_buf_size);
+
+       mu_addr = cpu_phy_to_mu(dev, dev->m0_rpc_phy + dev->rpc_buf_size);
+       rpc_set_print_buffer(&dev->shared_mem, mu_addr, dev->print_buf_size);
+       dev->print_buf = dev->m0_rpc_virt + dev->rpc_buf_size;
+
+       mu_addr = cpu_phy_to_mu(dev, dev->m0_rpc_phy);
+       MU_sendMesgToFW(dev->mu_base_virtaddr, RPC_BUF_OFFSET, mu_addr);
+
+       MU_sendMesgToFW(dev->mu_base_virtaddr, BOOT_ADDRESS,
+                       dev->m0_p_fw_space_phy);
+       MU_sendMesgToFW(dev->mu_base_virtaddr, INIT_DONE, 2);
+}
+
+static void get_core_supported_instance_count(struct core_device *core)
+{
+       pENC_RPC_HOST_IFACE iface;
+
+       iface = core->shared_mem.pSharedInterface;
+       core->supported_instance_count =
+               min_t(u32, iface->uMaxEncoderStreams, VID_API_NUM_STREAMS);
+}
+
+static int re_configure_codecs(struct core_device *core)
+{
+       int i;
+
+       for (i = 0; i < core->supported_instance_count; i++) {
+               struct vpu_ctx *ctx = core->ctx[i];
+               struct queue_data *queue;
+
+               if (!ctx || !test_bit(VPU_ENC_STATUS_INITIALIZED, &ctx->status))
+                       continue;
+
+               mutex_lock(&ctx->instance_mutex);
+               queue = &ctx->q_data[V4L2_SRC];
+               if (!list_empty(&queue->drv_q)) {
+                       vpu_dbg(LVL_INFO,
+                               "re configure codec for core[%d]\n", core->id);
+                       configure_codec(ctx);
+                       submit_input_and_encode(ctx);
+               }
+               mutex_unlock(&ctx->instance_mutex);
+       }
+
+       return 0;
+}
+
+static int wait_for_start_done(struct core_device *core, int resume)
+{
+       int ret;
+
+       if (!core)
+               return -EINVAL;
+
+       ret = wait_for_completion_timeout(&core->start_cmp,
+                                               msecs_to_jiffies(1000));
+       if (!ret) {
+               vpu_err("error: wait for core[%d] %s done timeout!\n",
+                               core->id, resume ? "resume" : "start");
+               return -EINVAL;
+       }
+
+       if (resume)
+               re_configure_codecs(core);
+
+       return 0;
+}
+
+static void vpu_core_start_done(struct core_device *core)
+{
+       if (!core)
+               return;
+
+       get_core_supported_instance_count(core);
+       core->firmware_started = true;
+       complete(&core->start_cmp);
+
+       show_firmware_version(core, LVL_ALL);
+}
+
+//This code is added for MU
+static irqreturn_t fsl_vpu_enc_mu_isr(int irq, void *This)
+{
+       struct core_device *dev = This;
+       u32 msg;
+
+       MU_ReceiveMsg(dev->mu_base_virtaddr, 0, &msg);
+       if (msg == 0xaa) {
+               enable_mu(dev);
+       } else if (msg == 0x55) {
+               vpu_core_start_done(dev);
+       }  else if (msg == 0xA5) {
+               /*receive snapshot done msg and wakeup complete to suspend*/
+               complete(&dev->snap_done_cmp);
+       } else
+               queue_work(dev->workqueue, &dev->msg_work);
+
+       return IRQ_HANDLED;
+}
+
+/* Initialization of the MU code. */
+static int vpu_enc_mu_init(struct core_device *core_dev)
+{
+       int ret = 0;
+
+       core_dev->mu_base_virtaddr =
+               core_dev->vdev->regs_base + core_dev->reg_base;
+       WARN_ON(!core_dev->mu_base_virtaddr);
+
+       vpu_dbg(LVL_INFO, "core[%d] irq : %d\n", core_dev->id, core_dev->irq);
+
+       ret = devm_request_irq(core_dev->generic_dev, core_dev->irq,
+                               fsl_vpu_enc_mu_isr,
+                               IRQF_EARLY_RESUME,
+                               "vpu_mu_isr",
+                               (void *)core_dev);
+       if (ret) {
+               vpu_err("request_irq failed %d, error = %d\n",
+                               core_dev->irq, ret);
+               return -EINVAL;
+       }
+
+       if (!core_dev->vpu_mu_init) {
+               MU_Init(core_dev->mu_base_virtaddr);
+               MU_EnableRxFullInt(core_dev->mu_base_virtaddr, 0);
+               core_dev->vpu_mu_init = 1;
+       }
+
+       return ret;
+}
+
+static struct vpu_ctx *get_ctx_by_index(struct core_device *core, int index)
+{
+       struct vpu_ctx *ctx = NULL;
+
+       if (!core)
+               return NULL;
+
+       if (index < 0 || index >= core->supported_instance_count)
+               return NULL;
+
+       ctx = core->ctx[index];
+       if (!ctx)
+               return NULL;
+
+       if (!test_bit(VPU_ENC_STATUS_INITIALIZED, &ctx->status)) {
+               vpu_err("core[%d]'s ctx[%d] is not initialized\n",
+                               core->id, index);
+               return NULL;
+       }
+
+       if (test_bit(VPU_ENC_STATUS_CLOSED, &ctx->status)) {
+               vpu_err("core[%d]'s ctx[%d] is closed\n",
+                               core->id, index);
+               return NULL;
+       }
+
+       return ctx;
+}
+
+static int process_ctx_msg(struct vpu_ctx *ctx, struct msg_header *header)
+{
+       int ret = 0;
+       struct vpu_event_msg *msg = NULL;
+       u32 *pdata = NULL;
+
+       if (!ctx || !header)
+               return -EINVAL;
+
+       if (ctx->ctx_released)
+               return -EINVAL;
+
+       msg = get_idle_msg(ctx);
+       if (!msg) {
+               vpu_err("get idle msg fail\n");
+               return -ENOMEM;
+       }
+
+       msg->idx = header->idx;
+       msg->msgid = header->msgid;
+       msg->number = header->msgnum;
+       pdata = msg->data;
+       ret = 0;
+       if (msg->number > ARRAY_SIZE(msg->data)) {
+               ret = alloc_msg_ext_buffer(msg, header->msgnum);
+               pdata = msg->ext_data;
+       }
+       rpc_read_msg_array(&ctx->core_dev->shared_mem, pdata, msg->number);
+       if (ret) {
+               put_idle_msg(ctx, msg);
+               return ret;
+       }
+
+       push_back_event_msg(ctx, msg);
+
+       return ret;
+}
+
+static int process_msg(struct core_device *core)
+{
+       struct msg_header header;
+       struct vpu_ctx *ctx = NULL;
+       int ret;
+
+       ret = rpc_get_msg_header(&core->shared_mem, &header);
+       if (ret)
+               return ret;
+
+       if (header.idx >= ARRAY_SIZE(core->ctx)) {
+               vpu_err("msg idx(%d) is out of range\n", header.idx);
+               return -EINVAL;
+       }
+
+       mutex_lock(&core->vdev->dev_mutex);
+       ctx = get_ctx_by_index(core, header.idx);
+       if (ctx != NULL) {
+               process_ctx_msg(ctx, &header);
+               queue_work(ctx->instance_wq, &ctx->instance_work);
+       } else {
+               vpu_err("msg[%d] of ctx[%d] is missed\n",
+                               header.msgid, header.idx);
+               rpc_read_msg_array(&core->shared_mem, NULL, header.msgnum);
+       }
+       mutex_unlock(&core->vdev->dev_mutex);
+
+       return 0;
+}
+
+extern u_int32 rpc_MediaIPFW_Video_message_check_encoder(struct shared_addr *This);
+static void vpu_enc_msg_run_work(struct work_struct *work)
+{
+       struct core_device *dev = container_of(work, struct core_device, msg_work);
+       /*struct vpu_ctx *ctx;*/
+       /*struct event_msg msg;*/
+       struct shared_addr *This = &dev->shared_mem;
+
+       while (rpc_MediaIPFW_Video_message_check_encoder(This) == API_MSG_AVAILABLE) {
+               process_msg(dev);
+       }
+       if (rpc_MediaIPFW_Video_message_check_encoder(This) == API_MSG_BUFFER_ERROR)
+               vpu_err("MSG num is too big to handle");
+}
+
+static void vpu_enc_msg_instance_work(struct work_struct *work)
+{
+       struct vpu_ctx *ctx = container_of(work, struct vpu_ctx, instance_work);
+       struct vpu_event_msg *msg;
+
+       while (1) {
+               msg = pop_event_msg(ctx);
+               if (!msg)
+                       break;
+               if (msg->ext_data)
+                       vpu_enc_event_handler(ctx, msg->msgid, msg->ext_data);
+               else
+                       vpu_enc_event_handler(ctx, msg->msgid, msg->data);
+
+               put_idle_msg(ctx, msg);
+       }
+}
+
+static int vpu_queue_setup(struct vb2_queue *vq,
+               unsigned int *buf_count,
+               unsigned int *plane_count,
+               unsigned int psize[],
+               struct device *allocators[])
+{
+       struct queue_data  *This = (struct queue_data *)vq->drv_priv;
+
+       vpu_dbg(LVL_BUF, "%s(), %s, (%d, %d)\n", __func__, This->desc,
+                       This->ctx->core_dev->id, This->ctx->str_index);
+
+       if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+               if (vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+                       *plane_count = 2;
+                       psize[0] = This->sizeimage[0];//check alignment
+                       psize[1] = This->sizeimage[1];//check colocated_size
+               } else {
+                       psize[0] = This->sizeimage[0] + This->sizeimage[1];
+                       *plane_count = 1;
+               }
+               if (*buf_count < MIN_BUFFER_COUNT)
+                       *buf_count = MIN_BUFFER_COUNT;
+       } else {
+               *plane_count = 1;
+               psize[0] = This->sizeimage[0];//check alignment
+       }
+
+       if (*buf_count > VPU_MAX_BUFFER)
+               *buf_count = VPU_MAX_BUFFER;
+       if (*buf_count < 1)
+               *buf_count = 1;
+
+       return 0;
+}
+
+static int vpu_buf_prepare(struct vb2_buffer *vb)
+{
+       struct vb2_queue *vq = vb->vb2_queue;
+       struct queue_data *q_data = (struct queue_data *)vq->drv_priv;
+
+       vpu_dbg(LVL_BUF, "%s(), %s, (%d, %d)\n", __func__, q_data->desc,
+                       q_data->ctx->core_dev->id, q_data->ctx->str_index);
+
+       return 0;
+}
+
+
+static int vpu_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+       struct queue_data *q_data = (struct queue_data *)q->drv_priv;
+
+       vpu_dbg(LVL_BUF, "%s(), %s, (%d, %d)\n", __func__, q_data->desc,
+                       q_data->ctx->core_dev->id, q_data->ctx->str_index);
+
+       return 0;
+}
+
+static void vpu_stop_streaming(struct vb2_queue *q)
+{
+       struct queue_data *q_data = (struct queue_data *)q->drv_priv;
+
+       vpu_dbg(LVL_BUF, "%s(), %s, (%d, %d)\n", __func__, q_data->desc,
+                       q_data->ctx->core_dev->id, q_data->ctx->str_index);
+       clear_queue(q_data);
+}
+
+static void vpu_buf_queue(struct vb2_buffer *vb)
+{
+       struct vb2_queue    *vq = vb->vb2_queue;
+       struct queue_data   *This = (struct queue_data *)vq->drv_priv;
+       struct vb2_data_req *data_req;
+       struct vpu_ctx *ctx = This->ctx;
+
+       vpu_dbg(LVL_BUF, "%s(), %s, (%d, %d)\n", __func__, This->desc,
+                       This->ctx->core_dev->id, This->ctx->str_index);
+
+       data_req = &This->vb2_reqs[vb->index];
+       data_req->vb2_buf = vb;
+       if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
+               fill_ctx_seq(ctx, data_req);
+               fill_vb_sequence(vb, data_req->sequence);
+       }
+       list_add_tail(&data_req->list, &This->drv_q);
+}
+
+static bool is_enc_dma_buf(struct vb2_buffer *vb)
+{
+       struct vb2_queue *vq = vb->vb2_queue;
+
+       if (vb->memory != V4L2_MEMORY_MMAP)
+               return false;
+
+       if (vq->mem_ops != &vb2_dma_contig_memops)
+               return false;
+
+       return true;
+}
+
+static int vpu_enc_buf_init(struct vb2_buffer *vb)
+{
+       struct vb2_queue *vq = vb->vb2_queue;
+       struct queue_data *queue = vq->drv_priv;
+       struct vpu_ctx *ctx = queue->ctx;
+       int i;
+
+       vpu_dbg(LVL_BUF, "%s(), %s, (%d, %d)\n", __func__, queue->desc,
+                       ctx->core_dev->id, ctx->str_index);
+       if (!is_enc_dma_buf(vb))
+               return 0;
+
+       for (i = 0; i < vb->num_planes; i++)
+               vpu_enc_add_dma_size(get_vpu_ctx_attr(ctx),
+                                       vb->planes[i].length);
+
+       return 0;
+}
+
+static void vpu_enc_buf_cleanup(struct vb2_buffer *vb)
+{
+       struct vb2_queue *vq = vb->vb2_queue;
+       struct queue_data *queue = vq->drv_priv;
+       struct vpu_ctx *ctx = queue->ctx;
+       int i;
+
+       vpu_dbg(LVL_BUF, "%s(), %s, (%d, %d)\n", __func__, queue->desc,
+                       ctx->core_dev->id, ctx->str_index);
+
+       if (!is_enc_dma_buf(vb))
+               return;
+
+       for (i = 0; i < vb->num_planes; i++)
+               vpu_enc_sub_dma_size(get_vpu_ctx_attr(ctx),
+                                       vb->planes[i].length);
+}
+
+static void vpu_prepare(struct vb2_queue *q)
+{
+       struct queue_data *q_data = (struct queue_data *)q->drv_priv;
+
+       vpu_dbg(LVL_BUF, "%s(), %s, (%d, %d)\n", __func__, q_data->desc,
+                       q_data->ctx->core_dev->id, q_data->ctx->str_index);
+}
+
+static void vpu_finish(struct vb2_queue *q)
+{
+       struct queue_data *q_data = (struct queue_data *)q->drv_priv;
+
+       vpu_dbg(LVL_BUF, "%s(), %s, (%d, %d)\n", __func__, q_data->desc,
+                       q_data->ctx->core_dev->id, q_data->ctx->str_index);
+}
+
+static struct vb2_ops vpu_enc_v4l2_qops = {
+       .queue_setup        = vpu_queue_setup,
+       .buf_init           = vpu_enc_buf_init,
+       .buf_cleanup        = vpu_enc_buf_cleanup,
+       .wait_prepare       = vpu_prepare,
+       .wait_finish        = vpu_finish,
+       .buf_prepare        = vpu_buf_prepare,
+       .start_streaming    = vpu_start_streaming,
+       .stop_streaming     = vpu_stop_streaming,
+       .buf_queue          = vpu_buf_queue,
+};
+
+static void init_vb2_queue(struct queue_data *This, unsigned int type,
+                               struct vpu_ctx *ctx,
+                               const struct vb2_mem_ops *mem_ops,
+                               gfp_t gfp_flags)
+{
+       struct vb2_queue  *vb2_q = &This->vb2_q;
+       int ret;
+
+       vpu_log_func();
+
+       // initialze driver queue
+       INIT_LIST_HEAD(&This->drv_q);
+       INIT_LIST_HEAD(&This->frame_q);
+       INIT_LIST_HEAD(&This->frame_idle);
+       atomic64_set(&This->frame_count, 0);
+       // initialize vb2 queue
+       vb2_q->type = type;
+       vb2_q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+       vb2_q->gfp_flags = gfp_flags;
+       vb2_q->ops = &vpu_enc_v4l2_qops;
+       vb2_q->drv_priv = This;
+       if (mem_ops)
+               vb2_q->mem_ops = mem_ops;
+       else
+               vb2_q->mem_ops = &vb2_dma_contig_memops;
+       vb2_q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+       vb2_q->dev = &ctx->dev->plat_dev->dev;
+       ret = vb2_queue_init(vb2_q);
+       if (ret)
+               vpu_err("%s vb2_queue_init() failed (%d)!\n",
+                               __func__,
+                               ret
+                               );
+       else
+               This->vb2_q_inited = true;
+}
+
+static void vpu_enc_init_output_queue(struct vpu_ctx *ctx,
+                                       struct queue_data *q)
+{
+       WARN_ON(!ctx);
+       WARN_ON(!q);
+
+       vpu_log_func();
+
+       init_vb2_queue(q,
+                       V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
+                       ctx,
+                       &vb2_dma_contig_memops,
+                       GFP_DMA32);
+       q->type = V4L2_SRC;
+       sema_init(&q->drv_q_lock, 1);
+       q->ctx = ctx;
+
+       q->supported_fmts = formats_yuv_enc;
+       q->fmt_count = ARRAY_SIZE(formats_yuv_enc);
+       q->current_fmt = &formats_yuv_enc[0];
+
+       q->width = VPU_ENC_WIDTH_DEFAULT;
+       q->height = VPU_ENC_HEIGHT_DEFAULT;
+       q->rect.left = 0;
+       q->rect.top = 0;
+       q->rect.width = VPU_ENC_WIDTH_DEFAULT;
+       q->rect.height = VPU_ENC_HEIGHT_DEFAULT;
+       scnprintf(q->desc, sizeof(q->desc), "OUTPUT");
+}
+
+static void vpu_enc_init_capture_queue(struct vpu_ctx *ctx,
+                                       struct queue_data *q)
+{
+       WARN_ON(!ctx);
+       WARN_ON(!q);
+
+       vpu_log_func();
+
+       init_vb2_queue(q,
+                       V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
+                       ctx,
+                       &vb2_vmalloc_memops, 0);
+       q->type = V4L2_DST;
+       sema_init(&q->drv_q_lock, 1);
+       q->ctx = ctx;
+
+       q->supported_fmts = formats_compressed_enc;
+       q->fmt_count = ARRAY_SIZE(formats_compressed_enc);
+       q->current_fmt = &formats_compressed_enc[0];
+
+       q->width = VPU_ENC_WIDTH_DEFAULT;
+       q->height = VPU_ENC_HEIGHT_DEFAULT;
+       scnprintf(q->desc, sizeof(q->desc), "CAPTURE");
+}
+
+static void vpu_enc_init_queue_data(struct vpu_ctx *ctx)
+{
+       vpu_enc_init_output_queue(ctx, &ctx->q_data[V4L2_SRC]);
+       vpu_enc_init_capture_queue(ctx, &ctx->q_data[V4L2_DST]);
+}
+
+static void vpu_enc_release_queue_data(struct vpu_ctx *ctx)
+{
+       vpu_enc_queue_release(&ctx->q_data[V4L2_SRC]);
+       vpu_enc_queue_release(&ctx->q_data[V4L2_DST]);
+}
+
+static void vpu_ctx_power_on(struct vpu_ctx *ctx)
+{
+       if (!ctx || !ctx->core_dev)
+               return;
+
+       if (ctx->power_status)
+               return;
+       pm_runtime_get_sync(ctx->core_dev->generic_dev);
+       ctx->power_status = true;
+}
+
+static void vpu_ctx_power_off(struct vpu_ctx *ctx)
+{
+       if (!ctx || !ctx->core_dev)
+               return;
+
+       if (!ctx->power_status)
+               return;
+       pm_runtime_put_sync(ctx->core_dev->generic_dev);
+       ctx->power_status = false;
+}
+
+static int set_vpu_fw_addr(struct vpu_dev *dev, struct core_device *core_dev)
+{
+       off_t reg_fw_base;
+
+       if (!dev || !core_dev)
+               return -EINVAL;
+
+       MU_Init(core_dev->mu_base_virtaddr);
+       MU_EnableRxFullInt(core_dev->mu_base_virtaddr, 0);
+
+       reg_fw_base = core_dev->reg_csr_base;
+       write_vpu_reg(dev, core_dev->m0_p_fw_space_phy, reg_fw_base);
+       write_vpu_reg(dev, 0x0, reg_fw_base + 4);
+
+       return 0;
+}
+
+static void cleanup_firmware_memory(struct core_device *core_dev)
+{
+       memset_io(core_dev->m0_p_fw_space_vir, 0, core_dev->fw_buf_size);
+}
+
+static int vpu_firmware_download(struct vpu_dev *This, u_int32 core_id)
+{
+       const struct firmware *m0_pfw = NULL;
+       const u8 *image;
+       unsigned int FW_Size = 0;
+       int ret = 0;
+       struct core_device *core_dev = &This->core_dev[core_id];
+       char *p = This->core_dev[core_id].m0_p_fw_space_vir;
+
+       vpu_log_func();
+
+       ret = request_firmware(&m0_pfw, M0FW_FILENAME, This->generic_dev);
+       if (ret) {
+               vpu_err("%s() request fw %s failed(%d)\n",
+                       __func__, M0FW_FILENAME, ret);
+
+               return ret;
+       }
+       vpu_dbg(LVL_DEBUG, "%s() request fw %s got size(%ld)\n",
+                       __func__, M0FW_FILENAME, m0_pfw->size);
+
+       image = m0_pfw->data;
+       FW_Size = min_t(u32, m0_pfw->size, This->core_dev[core_id].fw_buf_size);
+       This->core_dev[core_id].fw_actual_size = FW_Size;
+
+       cleanup_firmware_memory(core_dev);
+       memcpy(core_dev->m0_p_fw_space_vir, image, FW_Size);
+       p[16] = This->plat_type;
+       p[17] = core_id + 1;
+       p[18] = 1;
+       set_vpu_fw_addr(This, &This->core_dev[core_id]);
+
+       release_firmware(m0_pfw);
+       m0_pfw = NULL;
+
+       return ret;
+}
+
+static int download_vpu_firmware(struct vpu_dev *dev,
+                               struct core_device *core_dev)
+{
+       int ret = 0;
+
+       if (!dev || !core_dev)
+               return -EINVAL;
+
+       if (core_dev->fw_is_ready)
+               return 0;
+
+       vpu_dbg(LVL_INFO, "download firmware for core[%d]\n", core_dev->id);
+       init_completion(&core_dev->start_cmp);
+       ret = vpu_firmware_download(dev, core_dev->id);
+       if (ret) {
+               vpu_err("error: vpu_firmware_download fail\n");
+               goto exit;
+       }
+       wait_for_start_done(core_dev, 0);
+       if (!core_dev->firmware_started) {
+               vpu_err("core[%d] start firmware failed\n", core_dev->id);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       set_core_fw_status(core_dev, true);
+       clear_core_hang(core_dev);
+exit:
+       return ret;
+}
+
+static bool is_valid_ctx(struct vpu_ctx *ctx)
+{
+       if (!ctx)
+               return false;
+       if (!ctx->dev || !ctx->core_dev)
+               return false;
+       if (ctx->str_index >= ARRAY_SIZE(ctx->core_dev->ctx))
+               return false;
+
+       return true;
+}
+
+static void free_instance(struct vpu_ctx *ctx)
+{
+       if (!ctx)
+               return;
+
+       if (is_valid_ctx(ctx))
+               ctx->core_dev->ctx[ctx->str_index] = NULL;
+       VPU_SAFE_RELEASE(ctx, kfree);
+}
+
+static u32 count_free_core_slot(struct core_device *core)
+{
+       u32 count = 0;
+       int i;
+
+       for (i = 0; i < core->supported_instance_count; i++) {
+               if (!core->ctx[i])
+                       count++;
+       }
+
+       return count;
+}
+
+static struct core_device *find_proper_core(struct vpu_dev *dev)
+{
+       struct core_device *core = NULL;
+       u32 maximum = 0;
+       u32 count;
+       int i;
+       int ret;
+
+       for (i = 0; i < dev->core_num; i++) {
+               struct core_device *core_dev = &dev->core_dev[i];
+
+               ret = process_core_hang(core_dev);
+               if (ret)
+                       continue;
+
+               ret = download_vpu_firmware(dev, core_dev);
+               if (ret)
+                       continue;
+
+               if (core_dev->supported_instance_count == 0)
+                       continue;
+
+               count = count_free_core_slot(core_dev);
+               if (count == core_dev->supported_instance_count)
+                       return core_dev;
+
+               if (maximum < count) {
+                       core = core_dev;
+                       maximum = count;
+               }
+       }
+
+       return core;
+}
+
+static int request_instance(struct core_device *core, struct vpu_ctx *ctx)
+{
+       int found = 0;
+       int idx;
+
+       if (!core || !ctx)
+               return -EINVAL;
+
+       for (idx = 0; idx < core->supported_instance_count; idx++) {
+               if (!core->ctx[idx]) {
+                       found = 1;
+                       ctx->core_dev = core;
+                       ctx->str_index = idx;
+                       ctx->dev = core->vdev;
+                       break;
+               }
+       }
+
+       if (!found) {
+               vpu_err("cann't request any instance\n");
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+static int construct_vpu_ctx(struct vpu_ctx *ctx)
+{
+       if (!ctx)
+               return -EINVAL;
+
+       ctx->ctrl_inited = false;
+       mutex_init(&ctx->instance_mutex);
+       ctx->ctx_released = false;
+
+       return 0;
+}
+
+static struct vpu_ctx *create_and_request_instance(struct vpu_dev *dev)
+{
+       struct core_device *core = NULL;
+       struct vpu_ctx *ctx = NULL;
+       int ret;
+
+       if (!dev)
+               return NULL;
+
+       core = find_proper_core(dev);
+       if (!core)
+               return NULL;
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return NULL;
+
+       ret = request_instance(core, ctx);
+       if (ret < 0) {
+               VPU_SAFE_RELEASE(ctx, kfree);
+               return NULL;
+       }
+
+       construct_vpu_ctx(ctx);
+       vpu_ctx_power_on(ctx);
+       vpu_dbg(LVL_INFO, "request encoder instance : %d.%d\n",
+                       ctx->core_dev->id, ctx->str_index);
+
+       return ctx;
+}
+
+static int init_vpu_ctx_fh(struct vpu_ctx *ctx, struct vpu_dev *dev)
+{
+       if (!ctx || !dev)
+               return -EINVAL;
+
+       mutex_lock(&ctx->instance_mutex);
+
+       v4l2_fh_init(&ctx->fh, dev->pvpu_encoder_dev);
+       v4l2_fh_add(&ctx->fh);
+       ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+       clear_bit(VPU_ENC_STATUS_CLOSED, &ctx->status);
+
+       mutex_unlock(&ctx->instance_mutex);
+
+       return 0;
+}
+
+static void uninit_vpu_ctx_fh(struct vpu_ctx *ctx)
+{
+       if (!ctx)
+               return;
+
+       mutex_lock(&ctx->instance_mutex);
+
+       set_bit(VPU_ENC_STATUS_CLOSED, &ctx->status);
+       v4l2_fh_del(&ctx->fh);
+       v4l2_fh_exit(&ctx->fh);
+
+       mutex_unlock(&ctx->instance_mutex);
+}
+
+static void cancel_vpu_ctx(struct vpu_ctx *ctx)
+{
+       cancel_work_sync(&ctx->instance_work);
+       cleanup_ctx_msg_queue(ctx);
+}
+
+static void uninit_vpu_ctx(struct vpu_ctx *ctx)
+{
+       if (!ctx)
+               return;
+
+       clear_bit(VPU_ENC_STATUS_INITIALIZED, &ctx->status);
+       cancel_vpu_ctx(ctx);
+       if (ctx->instance_wq) {
+               destroy_workqueue(ctx->instance_wq);
+               ctx->instance_wq = NULL;
+       }
+       mutex_lock(&ctx->instance_mutex);
+       vpu_enc_free_stream(ctx);
+
+       ctx->ctx_released = true;
+       mutex_unlock(&ctx->instance_mutex);
+}
+
+static int init_vpu_ctx(struct vpu_ctx *ctx)
+{
+       INIT_WORK(&ctx->instance_work, vpu_enc_msg_instance_work);
+       ctx->instance_wq = alloc_workqueue("vpu_instance",
+                               WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+       if (!ctx->instance_wq) {
+               vpu_err("error: unable to alloc workqueue for ctx\n");
+               return -ENOMEM;
+       }
+
+       init_ctx_msg_queue(ctx);
+
+       vpu_enc_init_queue_data(ctx);
+       init_completion(&ctx->stop_cmp);
+
+       set_bit(VPU_ENC_STATUS_INITIALIZED, &ctx->status);
+       ctx->core_dev->ctx[ctx->str_index] = ctx;
+
+       return 0;
+}
+
+static int show_encoder_param(struct vpu_attr *attr,
+               pMEDIAIP_ENC_PARAM param, char *buf, u32 size)
+{
+       int num = 0;
+
+       num += scnprintf(buf + num, size - num,
+                       "encoder param:[setting/take effect]\n");
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Codec Mode",
+                       attr->param.eCodecMode, param->eCodecMode);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Profile",
+                       attr->param.eProfile, param->eProfile);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Level",
+                       attr->param.uLevel, param->uLevel);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Frame Rate",
+                       attr->param.uFrameRate, param->uFrameRate);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Source Stride",
+                       attr->param.uSrcStride, param->uSrcStride);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Source Width",
+                       attr->param.uSrcWidth, param->uSrcWidth);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Source Height",
+                       attr->param.uSrcHeight, param->uSrcHeight);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Source Offset x",
+                       attr->param.uSrcOffset_x, param->uSrcOffset_x);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Source Offset y",
+                       attr->param.uSrcOffset_y, param->uSrcOffset_y);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Source Crop Width",
+                       attr->param.uSrcCropWidth, param->uSrcCropWidth);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Source Crop Height",
+                       attr->param.uSrcCropHeight,
+                       param->uSrcCropHeight);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Out Width",
+                       attr->param.uOutWidth, param->uOutWidth);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Out Height",
+                       attr->param.uOutHeight, param->uOutHeight);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "I Frame Interval",
+                       attr->param.uIFrameInterval,
+                       param->uIFrameInterval);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Bframes",
+                       attr->param.uGopBLength, param->uGopBLength);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Low Latency Mode",
+                       attr->param.uLowLatencyMode,
+                       param->uLowLatencyMode);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Bitrate Mode",
+                       attr->param.eBitRateMode, param->eBitRateMode);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Target Bitrate",
+                       attr->param.uTargetBitrate,
+                       param->uTargetBitrate);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Min Bitrate",
+                       attr->param.uMinBitRate, param->uMinBitRate);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "Max Bitrate",
+                       attr->param.uMaxBitRate, param->uMaxBitRate);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-18s:%10d;%10d\n", "QP",
+                       attr->param.uInitSliceQP,
+                       param->uInitSliceQP);
+
+       return num;
+}
+
+static int show_queue_buffer_info(struct queue_data *queue, char *buf, u32 size)
+{
+       int i;
+       int num = 0;
+
+       for (i = 0; i < queue->vb2_q.num_buffers; i++) {
+               struct vb2_buffer *vb = queue->vb2_q.bufs[i];
+
+               num += scnprintf(buf + num, size - num, " %d", vb->state);
+       }
+
+       return num;
+}
+
+static int show_cmd_event(struct vpu_statistic *statistic, int i,
+                               char *buf, u32 size)
+{
+       int num = 0;
+
+       num += scnprintf(buf + num, PAGE_SIZE - num, "\t(%2d) ", i);
+
+       if (i <= GTB_ENC_CMD_RESERVED)
+               num += scnprintf(buf + num, PAGE_SIZE - num, "%-28s:%12ld;",
+                               get_cmd_str(i), statistic->cmd[i]);
+       else
+               num += scnprintf(buf + num, PAGE_SIZE - num, "%-28s:%12s;",
+                               "", "");
+
+       num += scnprintf(buf + num, PAGE_SIZE - num, "    ");
+       if (i <= VID_API_ENC_EVENT_RESERVED)
+               num += scnprintf(buf + num, PAGE_SIZE - num, "%-34s:%12ld;",
+                               get_event_str(i), statistic->event[i]);
+
+       num += scnprintf(buf + num, PAGE_SIZE - num, "\n");
+
+       return num;
+}
+
+static int show_cmd_event_infos(struct vpu_statistic *statistic,
+               char *buf, u32 size)
+{
+       int num = 0;
+       int i;
+       int count;
+
+       num += scnprintf(buf + num, size - num, "command/event:\n");
+
+       count = max((int)GTB_ENC_CMD_RESERVED, (int)VID_API_ENC_EVENT_RESERVED);
+       for (i = 0; i <= count; i++)
+               num += show_cmd_event(statistic, i, buf + num, size - num);
+
+       num += scnprintf(buf + num, size - num, "current status:\n");
+       num += scnprintf(buf + num, size - num,
+                       "\t%-10s:%36s;%10ld.%06ld\n", "commond",
+                       get_cmd_str(statistic->current_cmd),
+                       statistic->ts_cmd.tv_sec,
+                       statistic->ts_cmd.tv_nsec / 1000);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-10s:%36s;%10ld.%06ld\n", "event",
+                       get_event_str(statistic->current_event),
+                       statistic->ts_event.tv_sec,
+                       statistic->ts_event.tv_nsec / 1000);
+
+       return num;
+}
+
+static int show_single_fps_info(struct vpu_fps_sts *fps, char *buf, u32 size)
+{
+       const u32 COEF = VPU_FPS_COEF;
+       int num = 0;
+
+       num += scnprintf(buf + num, size - num, "%3ld.", fps->fps / COEF);
+       num += scnprintf(buf + num, size - num, "%02ld", fps->fps % COEF);
+       if (fps->thd)
+               num += scnprintf(buf + num, size - num, "(%ds)", fps->thd);
+       else
+               num += scnprintf(buf + num, size - num, "(avg)");
+
+       return num;
+}
+
+static int show_fps_info(struct vpu_fps_sts *fps, int count,
+                       char *buf, u32 size)
+{
+       int i;
+       int num = 0;
+
+       for (i = 0; i < count; i++) {
+               if (i > 0)
+                       num += scnprintf(buf + num, size - num, "  ");
+               num += show_single_fps_info(&fps[i], buf + num, size - num);
+       }
+
+       return num;
+}
+
+static int show_frame_sts(struct vpu_statistic *statistic, char *buf, u32 size)
+{
+       int num = 0;
+
+       num += scnprintf(buf + num, size - num,
+                       "frame count:\n");
+       num += scnprintf(buf + num, size - num, "\t%-24s:%ld\n",
+                       "dbuf input yuv count", statistic->yuv_count);
+       num += scnprintf(buf + num, size - num, "\t%-24s:%ld\n",
+                       "encode frame count", statistic->encoded_count);
+       num += scnprintf(buf + num, size - num, "\t%-24s:%ld\n",
+                       "dqbuf output h264 count", statistic->h264_count);
+
+       num += scnprintf(buf + num, size - num, "\t%-24s:", "actual fps:");
+       num += show_fps_info(statistic->fps, ARRAY_SIZE(statistic->fps),
+                               buf + num, PAGE_SIZE - num);
+       num += scnprintf(buf + num, size - num, "\n");
+       num += scnprintf(buf + num, size - num, "\t%-24s:%ld\n",
+                       "timestamp overwrite", statistic->timestamp_overwrite);
+
+       return num;
+}
+
+static int show_strip_info(struct vpu_statistic *statistic, char *buf, u32 size)
+{
+       int num = 0;
+
+       num += scnprintf(buf + num, size - num,
+                       "strip data frame count:\n");
+       num += scnprintf(buf + num, size - num,
+                       "\t fw      :%16ld (max : %ld; total : %ld)\n",
+                       statistic->strip_sts.fw.count,
+                       statistic->strip_sts.fw.max,
+                       statistic->strip_sts.fw.total);
+       num += scnprintf(buf + num, size - num,
+                       "\t begin   :%16ld (max : %ld; total : %ld)\n",
+                       statistic->strip_sts.begin.count,
+                       statistic->strip_sts.begin.max,
+                       statistic->strip_sts.begin.total);
+       num += scnprintf(buf + num, size - num,
+                       "\t eos     :%16ld (max : %ld; total : %ld)\n",
+                       statistic->strip_sts.eos.count,
+                       statistic->strip_sts.eos.max,
+                       statistic->strip_sts.eos.total);
+
+       return num;
+}
+
+static int show_v4l2_buf_status(struct vpu_ctx *ctx, char *buf, u32 size)
+{
+       int num = 0;
+
+       num += scnprintf(buf + num, size - num, "V4L2 Buffer Status: ");
+       num += scnprintf(buf + num, PAGE_SIZE - num, "(");
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:dequeued,", VB2_BUF_STATE_DEQUEUED);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:preparing,", VB2_BUF_STATE_PREPARING);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:prepared,", VB2_BUF_STATE_PREPARED);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:queued,", VB2_BUF_STATE_QUEUED);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:requeueing,", VB2_BUF_STATE_REQUEUEING);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:active,", VB2_BUF_STATE_ACTIVE);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:done,", VB2_BUF_STATE_DONE);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:error", VB2_BUF_STATE_ERROR);
+       num += scnprintf(buf + num, PAGE_SIZE - num, ")\n");
+       num += scnprintf(buf + num, size - num, "\tOUTPUT:");
+       num += show_queue_buffer_info(&ctx->q_data[V4L2_SRC],
+                                       buf + num,
+                                       size - num);
+       num += scnprintf(buf + num, size - num, "    CAPTURE:");
+       num += show_queue_buffer_info(&ctx->q_data[V4L2_DST],
+                                       buf + num,
+                                       size - num);
+       num += scnprintf(buf + num, size - num, "\n");
+
+       return num;
+}
+
+static int show_instance_status(struct vpu_ctx *ctx, char *buf, u32 size)
+{
+       int num = 0;
+
+       num += scnprintf(buf + num, size - num, "instance status:\n");
+       num += scnprintf(buf + num, size - num,
+                       "\t%-13s:0x%lx\n", "status", ctx->status);
+       num += scnprintf(buf + num, size - num,
+                       "\t%-13s:%d\n", "frozen count", ctx->frozen_count);
+
+       return num;
+}
+
+static int show_instance_others(struct vpu_attr *attr, char *buf, u32 size)
+{
+       int num = 0;
+       struct vpu_ctx *ctx = NULL;
+       struct vpu_dev *vpudev = attr->core->vdev;
+
+       num += scnprintf(buf + num, size - num, "others:\n");
+       if (attr->ts_start[V4L2_SRC] && attr->ts_start[V4L2_DST]) {
+               unsigned long latency;
+
+               latency = attr->ts_start[V4L2_DST] - attr->ts_start[V4L2_SRC];
+               num += scnprintf(buf + num, size - num,
+                               "\tlatency(ms)               :%ld\n", latency);
+       }
+
+       num += scnprintf(buf + num, size - num,
+                       "\ttotal dma size            :%ld\n",
+                       atomic64_read(&attr->total_dma_size));
+       num += scnprintf(buf + num, size - num,
+                       "\ttotal event msg obj count :%ld\n",
+                       attr->msg_count);
+       num += scnprintf(buf + num, size - num,
+                       "\ttotal msg ext data count  :%lld\n",
+                       get_total_ext_data_number());
+
+       mutex_lock(&vpudev->dev_mutex);
+       ctx = get_vpu_attr_ctx(attr);
+       if (ctx) {
+               num += scnprintf(buf + num, size - num,
+                       "\ttotal frame obj count     :%ld\n",
+                       atomic64_read(&ctx->q_data[V4L2_DST].frame_count));
+
+               if (test_bit(VPU_ENC_STATUS_HANG, &ctx->status))
+                       num += scnprintf(buf + num, size - num, "<hang>\n");
+       } else {
+               num += scnprintf(buf + num, size - num,
+                       "<instance has been released>\n");
+       }
+       mutex_unlock(&vpudev->dev_mutex);
+
+       return num;
+}
+
+static ssize_t show_instance_info(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct vpu_attr *vpu_attr;
+       struct vpu_dev *vpudev;
+       struct vpu_statistic *statistic;
+       struct vpu_ctx *ctx;
+       pMEDIAIP_ENC_PARAM param;
+       int num = 0;
+
+       vpu_attr = container_of(attr, struct vpu_attr,  dev_attr);
+       vpudev = vpu_attr->core->vdev;
+
+       num += scnprintf(buf + num, PAGE_SIZE,
+                       "pid: %d; tgid: %d\n", vpu_attr->pid, vpu_attr->tgid);
+
+       param = rpc_get_enc_param(&vpu_attr->core->shared_mem, vpu_attr->index);
+       num += show_encoder_param(vpu_attr, param, buf + num, PAGE_SIZE - num);
+
+       statistic = &vpu_attr->statistic;
+       num += show_cmd_event_infos(statistic, buf + num, PAGE_SIZE - num);
+       num += show_frame_sts(statistic, buf + num, PAGE_SIZE - num);
+       num += show_strip_info(statistic, buf + num, PAGE_SIZE - num);
+
+       mutex_lock(&vpudev->dev_mutex);
+       ctx = get_vpu_attr_ctx(vpu_attr);
+       if (ctx) {
+               num += show_v4l2_buf_status(ctx, buf + num, PAGE_SIZE - num);
+               num += show_instance_status(ctx, buf + num, PAGE_SIZE - num);
+       }
+       mutex_unlock(&vpudev->dev_mutex);
+
+       num += show_instance_others(vpu_attr, buf + num, PAGE_SIZE - num);
+
+       return num;
+}
+
+static ssize_t show_core_info(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct core_device *core = NULL;
+       char *fw = NULL;
+       int num = 0;
+
+       core = container_of(attr, struct core_device, core_attr);
+       fw = core->m0_p_fw_space_vir;
+
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "core[%d] info:\n", core->id);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "vpu mu id       : %d\n", core->vpu_mu_id);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "irq             : %d\n", core->irq);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "reg mu mcu      : 0x%08x 0x%08x\n",
+                       core->reg_base, core->reg_size);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "reg csr         : 0x%08x 0x%08x\n",
+                       core->reg_csr_base, core->reg_csr_size);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "fw space phy    : 0x%08x\n", core->m0_p_fw_space_phy);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "fw space size   : 0x%08x\n", core->fw_buf_size);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "fw actual size  : 0x%08x\n", core->fw_actual_size);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "rpc phy         : 0x%08x\n", core->m0_rpc_phy);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "rpc buf size    : 0x%08x\n", core->rpc_buf_size);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "rpc actual size : 0x%08x\n", core->rpc_actual_size);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "print buf phy   : 0x%08x\n",
+                       core->m0_rpc_phy + core->rpc_buf_size);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "print buf size  : 0x%08x\n", core->print_buf_size);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "max instance num: %d\n",
+                       core->supported_instance_count);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "fw_is_ready     : %d\n", core->fw_is_ready);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "firmware_started: %d\n", core->firmware_started);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "hang            : %d\n", core->hang);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "reset times     : %ld\n", core->reset_times);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "heartbeat       : %02x\n", core->vdev->heartbeat);
+       if (core->fw_is_ready) {
+               pENC_RPC_HOST_IFACE iface = core->shared_mem.pSharedInterface;
+
+               num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "firmware version: %d.%d.%d\n",
+                       (iface->FWVersion & 0x00ff0000) >> 16,
+                       (iface->FWVersion & 0x0000ff00) >> 8,
+                       iface->FWVersion & 0x000000ff);
+               num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "fw info         : 0x%02x 0x%02x\n", fw[16], fw[17]);
+       }
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "driver version  : %s\n", VPU_ENC_DRIVER_VERSION);
+
+       return num;
+}
+
+static int show_vb2_memory(struct vb2_buffer *vb, char *buf, u32 size)
+{
+       int num = 0;
+       int i;
+
+       for (i = 0; i < vb->num_planes; i++) {
+               num += scnprintf(buf + num, size - num, "0x%8x 0x%x",
+                               get_vb2_plane_phy_addr(vb, i),
+                               vb->planes[i].length);
+               if (i == vb->num_planes - 1)
+                       num += scnprintf(buf + num, size - num, "\n");
+               else
+                       num += scnprintf(buf + num, size - num, "; ");
+       }
+
+       return num;
+}
+
+static int show_queue_memory(struct queue_data *queue, char *buf, u32 size,
+                               char *prefix)
+{
+       int num = 0;
+       int i;
+
+       num += scnprintf(buf + num, size - num, "%s%4s v4l2buf  :\n", prefix,
+                       queue->type == V4L2_SRC ? "YUV" : "H264");
+
+       for (i = 0; i < queue->vb2_q.num_buffers; i++) {
+               struct vb2_buffer *vb = queue->vb2_q.bufs[i];
+
+               num += scnprintf(buf + num, size - num, "%s%18s", prefix, "");
+               num += show_vb2_memory(vb, buf + num, size - num);
+       }
+
+       return num;
+}
+
+static int show_ctx_memory_details(struct vpu_ctx *ctx, char *buf, u32 size,
+                               char *prefix)
+{
+       int num = 0;
+       int i;
+
+       if (!ctx)
+               return 0;
+
+       num += scnprintf(buf + num, size - num, "%smemory details:\n", prefix);
+       num += scnprintf(buf + num, size - num, "%sencFrames    :\n", prefix);
+       for (i = 0; i < MEDIAIP_MAX_NUM_WINDSOR_SRC_FRAMES; i++) {
+               num += scnprintf(buf + num, size - num, "%s%14s", prefix, "");
+               num += scnprintf(buf + num, size - num, "[%d] 0x%8llx 0x%x\n",
+                               i,
+                               ctx->encFrame[i].phy_addr,
+                               ctx->encFrame[i].size);
+       }
+
+       num += scnprintf(buf + num, size - num, "%srefFrames    :\n", prefix);
+       for (i = 0; i < MEDIAIP_MAX_NUM_WINDSOR_REF_FRAMES; i++) {
+               num += scnprintf(buf + num, size - num, "%s%14s", prefix, "");
+               num += scnprintf(buf + num, size - num, "[%d] 0x%8llx 0x%x\n",
+                               i,
+                               ctx->refFrame[i].phy_addr,
+                               ctx->refFrame[i].size);
+       }
+
+       num += scnprintf(buf + num, size - num, "%sactFrames    :\n", prefix);
+       num += scnprintf(buf + num, size - num, "%s%18s", prefix, "");
+       num += scnprintf(buf + num, size - num, "0x%8llx 0x%x\n",
+                       ctx->actFrame.phy_addr, ctx->actFrame.size);
+
+       num += scnprintf(buf + num, size - num, "%sencoderStream:\n", prefix);
+       num += scnprintf(buf + num, size - num, "%s%18s", prefix, "");
+       num += scnprintf(buf + num, size - num, "0x%8llx 0x%x\n",
+                       ctx->encoder_stream.phy_addr, ctx->encoder_stream.size);
+
+       for (i = 0; i < ARRAY_SIZE(ctx->q_data); i++) {
+               struct queue_data *queue = &ctx->q_data[i];
+
+               if (queue->vb2_q.memory != V4L2_MEMORY_MMAP)
+                       continue;
+               if (queue->vb2_q.mem_ops != &vb2_dma_contig_memops)
+                       continue;
+
+               num += show_queue_memory(queue, buf + num, size - num, prefix);
+       }
+
+       return num;
+}
+
+static ssize_t show_memory_info(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct vpu_dev *vdev = dev_get_drvdata(dev);
+       unsigned long total_dma_size = 0;
+       int num = 0;
+       int i;
+       int j;
+       int core_id = (show_detail_index >> 8) & 0xff;
+       int ctx_id = show_detail_index & 0xff;
+
+       num += scnprintf(buf + num, PAGE_SIZE - num, "dma memory usage:\n");
+       for (i = 0; i < vdev->core_num; i++) {
+               struct core_device *core = &vdev->core_dev[i];
+
+               num += scnprintf(buf + num, PAGE_SIZE - num, "core[%d]\n", i);
+
+               for (j = 0; j < ARRAY_SIZE(core->attr); j++) {
+                       struct vpu_attr *attr = &core->attr[j];
+                       unsigned long size;
+
+                       size = atomic64_read(&attr->total_dma_size);
+                       total_dma_size += size;
+                       num += scnprintf(buf + num, PAGE_SIZE - num,
+                                       "\t[%d] : %ld\n", j, size);
+                       if (core_id != i || ctx_id != j)
+                               continue;
+                       mutex_lock(&vdev->dev_mutex);
+                       num += show_ctx_memory_details(core->ctx[j],
+                                                       buf + num,
+                                                       PAGE_SIZE - num,
+                                                       "\t\t");
+                       mutex_unlock(&vdev->dev_mutex);
+               }
+       }
+
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "total dma             : %ld\n", total_dma_size);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "total reserved memory : %ld\n",
+                       vdev->reserved_mem.bytesused);
+       show_detail_index = VPU_DETAIL_INDEX_DFT;
+
+       return num;
+}
+
+static ssize_t store_memory_info_index(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       long val = VPU_DETAIL_INDEX_DFT;
+
+       if (!kstrtol(buf, 0, &val))
+               show_detail_index = val;
+
+       return count;
+}
+DEVICE_ATTR(meminfo, 0664, show_memory_info, store_memory_info_index);
+
+static ssize_t show_buffer_info(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct vpu_dev *vdev = dev_get_drvdata(dev);
+       int num = 0;
+       int i;
+       int j;
+
+       mutex_lock(&vdev->dev_mutex);
+       num += scnprintf(buf + num, PAGE_SIZE - num, "vpu encoder buffers:\t");
+       num += scnprintf(buf + num, PAGE_SIZE - num, "(");
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:dequeued,", VB2_BUF_STATE_DEQUEUED);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:preparing,", VB2_BUF_STATE_PREPARING);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:prepared,", VB2_BUF_STATE_PREPARED);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:queued,", VB2_BUF_STATE_QUEUED);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:requeueing,", VB2_BUF_STATE_REQUEUEING);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:active,", VB2_BUF_STATE_ACTIVE);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:done,", VB2_BUF_STATE_DONE);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       " %d:error", VB2_BUF_STATE_ERROR);
+       num += scnprintf(buf + num, PAGE_SIZE - num, ")\n");
+
+       for (i = 0; i < vdev->core_num; i++) {
+               struct core_device *core = &vdev->core_dev[i];
+
+               if (!core->supported_instance_count)
+                       continue;
+
+               num += scnprintf(buf + num, PAGE_SIZE - num, "core[%d]\n", i);
+               for (j = 0; j < core->supported_instance_count; j++) {
+                       struct vpu_ctx *ctx = core->ctx[j];
+
+                       if (!ctx)
+                               continue;
+                       num += scnprintf(buf + num, PAGE_SIZE - num,
+                                       "\t[%d]: ", j);
+                       num += scnprintf(buf + num,
+                                       PAGE_SIZE - num, "OUTPUT:");
+                       num += show_queue_buffer_info(&ctx->q_data[V4L2_SRC],
+                                                       buf + num,
+                                                       PAGE_SIZE - num);
+                       num += scnprintf(buf + num,
+                                       PAGE_SIZE - num, "    CAPTURE:");
+                       num += show_queue_buffer_info(&ctx->q_data[V4L2_DST],
+                                                       buf + num,
+                                                       PAGE_SIZE - num);
+                       num += scnprintf(buf + num, PAGE_SIZE - num, "\n");
+               }
+       }
+       mutex_unlock(&vdev->dev_mutex);
+
+       return num;
+}
+DEVICE_ATTR(buffer, 0444, show_buffer_info, NULL);
+
+static ssize_t show_fpsinfo(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct vpu_dev *vdev = dev_get_drvdata(dev);
+       int num = 0;
+       int i;
+       int j;
+
+       for (i = 0; i < vdev->core_num; i++) {
+               struct core_device *core = &vdev->core_dev[i];
+
+               if (!core->supported_instance_count)
+                       continue;
+
+               num += scnprintf(buf + num, PAGE_SIZE - num, "core[%d]\n", i);
+               for (j = 0; j < core->supported_instance_count; j++) {
+                       struct vpu_attr *attr = &core->attr[j];
+
+                       if (!attr->created)
+                               continue;
+                       num += scnprintf(buf + num, PAGE_SIZE - num,
+                                       "\t[%d]", j);
+                       num += scnprintf(buf + num, PAGE_SIZE - num,
+                                       "  %3d(setting)  ",
+                                       attr->param.uFrameRate);
+                       num += show_fps_info(attr->statistic.fps,
+                                       ARRAY_SIZE(attr->statistic.fps),
+                                       buf + num, PAGE_SIZE - num);
+                       num += scnprintf(buf + num, PAGE_SIZE - num, "\n");
+               }
+       }
+
+       return num;
+}
+DEVICE_ATTR(fpsinfo, 0444, show_fpsinfo, NULL);
+
+static ssize_t show_vpuinfo(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct vpu_dev *vdev = dev_get_drvdata(dev);
+       int num = 0;
+
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "core number          : %d\n", vdev->core_num);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "platform type        : %d\n", vdev->plat_type);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "reg-vpu              : 0x%8x 0x%08x\n",
+                       vdev->reg_vpu_base, vdev->reg_vpu_size);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "reg-rpc-system       : 0x%08x\n",
+                       vdev->reg_rpc_system);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "reserved-memory      : 0x%08lx 0x%08lx\n",
+                       vdev->reserved_mem.phy_addr, vdev->reserved_mem.size);
+       num += scnprintf(buf + num, PAGE_SIZE - num, "supported resolution :");
+       num += scnprintf(buf + num, PAGE_SIZE - num, " %dx%d(min);",
+                       vdev->supported_size.min_width,
+                       vdev->supported_size.min_height);
+       num += scnprintf(buf + num, PAGE_SIZE - num, " %dx%d(step);",
+                       vdev->supported_size.step_width,
+                       vdev->supported_size.step_height);
+       num += scnprintf(buf + num, PAGE_SIZE - num, " %dx%d(max)\n",
+                       vdev->supported_size.max_width,
+                       vdev->supported_size.max_height);
+       num += scnprintf(buf + num, PAGE_SIZE - num,
+                       "supported frame rate : %d(min); %d(step); %d(max)\n",
+                       vdev->supported_fps.min,
+                       vdev->supported_fps.step,
+                       vdev->supported_fps.max);
+
+       return num;
+}
+DEVICE_ATTR(vpuinfo, 0444, show_vpuinfo, NULL);
+
+static void reset_statistic(struct vpu_attr *attr)
+{
+       if (!attr)
+               return;
+
+       memset(&attr->statistic, 0, sizeof(attr->statistic));
+       attr->statistic.current_cmd = GTB_ENC_CMD_NOOP;
+       attr->statistic.current_event = VID_API_EVENT_UNDEFINED;
+}
+
+static int init_vpu_attr_fps_sts(struct vpu_attr *attr)
+{
+       const unsigned int THDS[] = VPU_FPS_STS_THDS;
+       int i;
+
+       for (i = 0; i < VPU_FPS_STS_CNT; i++) {
+               if (i < ARRAY_SIZE(THDS))
+                       attr->statistic.fps[i].thd = THDS[i];
+               else
+                       attr->statistic.fps[i].thd = 0;
+       }
+
+       return 0;
+}
+
+static int enable_fps_sts(struct vpu_attr *attr)
+{
+       int i;
+       struct vpu_statistic *sts = &attr->statistic;
+
+       sts->fps_sts_enable = true;
+
+       for (i = 0; i < VPU_FPS_STS_CNT; i++) {
+               getrawmonotonic(&sts->fps[i].ts);
+               sts->fps[i].frame_number = sts->encoded_count;
+       }
+
+       return 0;
+}
+
+static int disable_fps_sts(struct vpu_attr *attr)
+{
+       attr->statistic.fps_sts_enable = false;
+
+       return 0;
+}
+
+static int init_vpu_attr(struct vpu_attr *attr)
+{
+       if (!attr || !attr->core)
+               return -EINVAL;
+
+       reset_statistic(attr);
+       memset(&attr->param, 0, sizeof(attr->param));
+       attr->pid = current->pid;
+       attr->tgid = current->tgid;
+       if (!attr->created) {
+               device_create_file(attr->core->generic_dev, &attr->dev_attr);
+               attr->created = true;
+       }
+
+       init_vpu_attr_fps_sts(attr);
+
+       return 0;
+}
+
+static int release_instance(struct vpu_ctx *ctx)
+{
+       struct vpu_dev *dev;
+
+       if (!ctx || !ctx->dev)
+               return -EINVAL;
+
+       if (!test_bit(VPU_ENC_STATUS_CLOSED, &ctx->status))
+               return 0;
+       if (!test_bit(VPU_ENC_STATUS_FORCE_RELEASE, &ctx->status)) {
+               if (test_bit(VPU_ENC_STATUS_START_SEND, &ctx->status) &&
+                       !test_bit(VPU_ENC_STATUS_STOP_DONE, &ctx->status))
+                       return -EINVAL;
+       }
+
+       dev = ctx->dev;
+
+       uninit_vpu_ctx(ctx);
+       vpu_enc_free_ctrls(ctx);
+       vpu_enc_release_queue_data(ctx);
+       vpu_enc_free_mem(ctx, get_rpc_mem_pool(ctx));
+
+       vpu_ctx_power_off(ctx);
+       free_instance(ctx);
+
+       return 0;
+}
+
+static int try_to_release_idle_instance(struct vpu_dev *dev)
+{
+       int i;
+       int j;
+
+       if (!dev)
+               return -EINVAL;
+
+       for (i = 0; i < dev->core_num; i++) {
+               if (dev->core_dev[i].hang)
+                       set_core_force_release(&dev->core_dev[i]);
+               for (j = 0; j < dev->core_dev[i].supported_instance_count; j++)
+                       release_instance(dev->core_dev[i].ctx[j]);
+       }
+
+       return 0;
+}
+
+struct vpu_attr *get_vpu_ctx_attr(struct vpu_ctx *ctx)
+{
+       WARN_ON(!ctx || !ctx->core_dev);
+
+       if (ctx->str_index >= ctx->core_dev->supported_instance_count)
+               return NULL;
+
+       return &ctx->core_dev->attr[ctx->str_index];
+}
+
+struct vpu_ctx *get_vpu_attr_ctx(struct vpu_attr *attr)
+{
+       WARN_ON(!attr || !attr->core);
+
+       if (attr->index >= attr->core->supported_instance_count)
+               return NULL;
+
+       return attr->core->ctx[attr->index];
+}
+
+static int vpu_enc_v4l2_open(struct file *filp)
+{
+       struct video_device *vdev = video_devdata(filp);
+       struct vpu_dev *dev = video_get_drvdata(vdev);
+       struct vpu_ctx *ctx = NULL;
+       int ret;
+
+       vpu_log_func();
+
+       mutex_lock(&dev->dev_mutex);
+       try_to_release_idle_instance(dev);
+
+       pm_runtime_get_sync(dev->generic_dev);
+       ctx = create_and_request_instance(dev);
+       pm_runtime_put_sync(dev->generic_dev);
+       if (!ctx) {
+               mutex_unlock(&dev->dev_mutex);
+               vpu_err("failed to create encoder ctx\n");
+               return -ENOMEM;
+       }
+
+       init_vpu_attr(get_vpu_ctx_attr(ctx));
+       ret = init_vpu_ctx(ctx);
+       if (ret) {
+               mutex_unlock(&dev->dev_mutex);
+               vpu_err("init vpu ctx fail\n");
+               goto error;
+       }
+
+       initialize_enc_param(ctx);
+       vpu_enc_setup_ctrls(ctx);
+
+       init_vpu_ctx_fh(ctx, dev);
+       init_ctx_seq_info(ctx);
+       filp->private_data = &ctx->fh;
+       mutex_unlock(&dev->dev_mutex);
+
+       return 0;
+error:
+       mutex_lock(&dev->dev_mutex);
+       set_bit(VPU_ENC_STATUS_FORCE_RELEASE, &ctx->status);
+       VPU_SAFE_RELEASE(ctx, release_instance);
+       mutex_unlock(&dev->dev_mutex);
+       return ret;
+}
+
+static int vpu_enc_v4l2_release(struct file *filp)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(filp->private_data);
+       struct vpu_dev *dev = ctx->dev;
+
+       vpu_log_func();
+
+       request_eos(ctx);
+       wait_for_stop_done(ctx);
+
+       mutex_lock(&dev->dev_mutex);
+
+       uninit_vpu_ctx_fh(ctx);
+       filp->private_data = NULL;
+
+       VPU_SAFE_RELEASE(ctx, release_instance);
+       mutex_unlock(&dev->dev_mutex);
+
+       return 0;
+}
+
+static unsigned int vpu_enc_v4l2_poll(struct file *filp, poll_table *wait)
+{
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(filp->private_data);
+       struct vb2_queue *src_q, *dst_q;
+       unsigned int rc = 0;
+
+       vpu_dbg(LVL_FUNC, "%s(), event: 0x%lx\n", __func__,
+                       (unsigned long)poll_requested_events(wait));
+
+       poll_wait(filp, &ctx->fh.wait, wait);
+
+       if (v4l2_event_pending(&ctx->fh)) {
+               vpu_dbg(LVL_DEBUG, "%s() v4l2_event_pending\n", __func__);
+               rc |= POLLPRI;
+       }
+
+       src_q = &ctx->q_data[V4L2_SRC].vb2_q;
+       dst_q = &ctx->q_data[V4L2_DST].vb2_q;
+
+       if ((!src_q->streaming || list_empty(&src_q->queued_list))
+               && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
+               rc |= POLLERR;
+               return rc;
+       }
+
+       poll_wait(filp, &src_q->done_wq, wait);
+       if (!list_empty(&src_q->done_list))
+               rc |= POLLOUT | POLLWRNORM;
+       poll_wait(filp, &dst_q->done_wq, wait);
+       if (!list_empty(&dst_q->done_list))
+               rc |= POLLIN | POLLRDNORM;
+
+       return rc;
+}
+
+static int vpu_enc_v4l2_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       long ret = -EPERM;
+       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
+       struct queue_data *q_data;
+       enum QUEUE_TYPE type;
+
+       struct vpu_ctx *ctx = v4l2_fh_to_ctx(filp->private_data);
+
+       vpu_log_func();
+
+       if (ctx) {
+               type = offset >> MMAP_BUF_TYPE_SHIFT;
+               q_data = &ctx->q_data[type];
+
+               offset &= ~MMAP_BUF_TYPE_MASK;
+               offset = offset >> PAGE_SHIFT;
+               vma->vm_pgoff = offset;
+               ret = vpu_enc_queue_mmap(q_data, vma);
+       }
+
+       return ret;
+}
+
+static const struct v4l2_file_operations vpu_enc_v4l2_fops = {
+       .owner = THIS_MODULE,
+       .open  = vpu_enc_v4l2_open,
+       .unlocked_ioctl = video_ioctl2,
+       .release = vpu_enc_v4l2_release,
+       .poll = vpu_enc_v4l2_poll,
+       .mmap = vpu_enc_v4l2_mmap,
+};
+
+static struct video_device vpu_enc_v4l2_videodevice = {
+       .name   = "vpu encoder",
+       .fops   = &vpu_enc_v4l2_fops,
+       .ioctl_ops = &vpu_enc_v4l2_ioctl_ops,
+       .vfl_dir = VFL_DIR_M2M,
+};
+
+static void vpu_enc_setup(struct vpu_dev *This)
+{
+       const off_t offset = SCB_XREG_SLV_BASE + SCB_SCB_BLK_CTRL;
+       uint32_t read_data = 0;
+
+       vpu_log_func();
+
+       write_vpu_reg(This, 0x1, offset + SCB_BLK_CTRL_SCB_CLK_ENABLE_SET);
+       write_vpu_reg(This, 0xffffffff, 0x70190);
+       write_vpu_reg(This, 0xffffffff, offset + SCB_BLK_CTRL_XMEM_RESET_SET);
+       write_vpu_reg(This, 0xE, offset + SCB_BLK_CTRL_SCB_CLK_ENABLE_SET);
+       write_vpu_reg(This, 0x7, offset + SCB_BLK_CTRL_CACHE_RESET_SET);
+       write_vpu_reg(This, 0x102, XMEM_CONTROL);
+
+       read_data = read_vpu_reg(This, 0x70108);
+       vpu_dbg(LVL_IRQ, "%s read_data=%x\n", __func__, read_data);
+}
+
+static void vpu_enc_reset(struct vpu_dev *This)
+{
+       const off_t offset = SCB_XREG_SLV_BASE + SCB_SCB_BLK_CTRL;
+
+       vpu_log_func();
+       write_vpu_reg(This, 0x7, offset + SCB_BLK_CTRL_CACHE_RESET_CLR);
+}
+
+static int vpu_enc_enable_hw(struct vpu_dev *This)
+{
+       vpu_log_func();
+       vpu_enc_setup(This);
+
+       This->hw_enable = true;
+
+       return 0;
+}
+
+static void vpu_enc_disable_hw(struct vpu_dev *This)
+{
+       This->hw_enable = false;
+       vpu_enc_reset(This);
+       if (This->regs_base) {
+               iounmap(This->regs_base);
+               This->regs_base = NULL;
+       }
+}
+
+static int parse_core_info(struct core_device *core, struct device_node *np)
+{
+       int ret;
+       u32 val;
+
+       WARN_ON(!core || !np);
+
+       ret = of_property_read_u32_index(np, "reg", 0, &val);
+       if (ret) {
+               vpu_err("find reg for core[%d] fail\n", core->id);
+               return ret;
+       }
+       core->reg_base = val;
+
+       ret = of_property_read_u32_index(np, "reg", 1, &val);
+       if (ret) {
+               vpu_err("find reg for core[%d] fail\n", core->id);
+               return ret;
+       }
+       core->reg_size = val;
+
+       ret = of_property_read_u32_index(np, "reg-csr", 0, &val);
+       if (ret) {
+               vpu_err("find reg-csr for core[%d] fail\n", core->id);
+               return ret;
+       }
+       core->reg_csr_base = val;
+
+       ret = of_property_read_u32_index(np, "reg-csr", 1, &val);
+       if (ret) {
+               vpu_err("find reg-csr for core[%d] fail\n", core->id);
+               return ret;
+       }
+       core->reg_csr_size = val;
+
+       ret = of_irq_get(np, 0);
+       if (ret < 0) {
+               vpu_err("get irq for core[%d] fail\n", core->id);
+               return -EINVAL;
+       }
+       core->irq = ret;
+
+       ret = of_property_read_u32(np, "fsl,vpu_ap_mu_id", &val);
+       if (!ret)
+               core->vpu_mu_id = val;
+
+       ret = of_property_read_u32(np, "fw-buf-size", &val);
+       if (ret) {
+               vpu_err("find fw-buf-size for core[%d] fail\n", core->id);
+               core->fw_buf_size = M0_BOOT_SIZE_DEFAULT;
+       } else {
+               core->fw_buf_size = val;
+       }
+       core->fw_buf_size = max_t(u32, core->fw_buf_size, M0_BOOT_SIZE_MIN);
+
+       ret = of_property_read_u32(np, "rpc-buf-size", &val);
+       if (ret) {
+               vpu_err("find rpc-buf-size for core[%d] fail\n", core->id);
+               core->rpc_buf_size = RPC_SIZE_DEFAULT;
+       } else {
+               core->rpc_buf_size = val;
+       }
+       core->rpc_buf_size = max_t(u32, core->rpc_buf_size, RPC_SIZE_MIN);
+
+       ret = of_property_read_u32(np, "print-buf-size", &val);
+       if (ret) {
+               vpu_err("find print-buf-size for core[%d] fail\n", core->id);
+               core->print_buf_size = PRINT_SIZE_DEFAULT;
+       } else {
+               core->print_buf_size = val;
+       }
+       core->print_buf_size = max_t(u32, core->print_buf_size, PRINT_SIZE_MIN);
+
+       return 0;
+}
+
+static int parse_dt_cores(struct vpu_dev *dev, struct device_node *np)
+{
+       char core_name[64];
+       struct device_node *node = NULL;
+       struct core_device *core = NULL;
+       int i;
+       int ret;
+
+       dev->core_num = 0;
+       for (i = 0; i < VPU_ENC_MAX_CORE_NUM; i++) {
+               scnprintf(core_name, sizeof(core_name), "core%d", i);
+               node = of_find_node_by_name(np, core_name);
+               if (!node) {
+                       vpu_dbg(LVL_INFO, "can't find %s\n", core_name);
+                       break;
+               }
+
+               core = &dev->core_dev[i];
+               core->id = i;
+               ret = parse_core_info(core, node);
+               of_node_put(node);
+               node = NULL;
+               if (ret) {
+                       vpu_err("parse core[%d] fail\n", i);
+                       break;
+               }
+       }
+       if (i == 0)
+               return -EINVAL;
+
+       dev->core_num = i;
+
+       return 0;
+}
+
+static int parse_dt_info(struct vpu_dev *dev, struct device_node *np)
+{
+       int ret;
+       struct device_node *reserved_node = NULL;
+       struct resource reserved_fw;
+       struct resource reserved_rpc;
+       struct resource reserved_mem;
+       u32 fw_total_size = 0;
+       u32 rpc_total_size = 0;
+       u32 val;
+       u32 i;
+
+       if (!dev || !np)
+               return -EINVAL;
+
+       ret = of_property_read_u32_index(np, "reg-rpc-system", 0, &val);
+       if (ret) {
+               vpu_err("get reg-rpc-system fail\n");
+               return -EINVAL;
+       }
+       dev->reg_rpc_system = val;
+
+       reserved_node = of_parse_phandle(np, "boot-region", 0);
+       if (!reserved_node) {
+               vpu_err("error: boot-region of_parse_phandle error\n");
+               return -ENODEV;
+       }
+       if (of_address_to_resource(reserved_node, 0, &reserved_fw)) {
+               vpu_err("error: boot-region of_address_to_resource error\n");
+               return -EINVAL;
+       }
+
+       reserved_node = of_parse_phandle(np, "rpc-region", 0);
+       if (!reserved_node) {
+               vpu_err("error: rpc-region of_parse_phandle error\n");
+               return -ENODEV;
+       }
+       if (of_address_to_resource(reserved_node, 0, &reserved_rpc)) {
+               vpu_err("error: rpc-region of_address_to_resource error\n");
+               return -EINVAL;
+       }
+
+       reserved_node = of_parse_phandle(np, "reserved-region", 0);
+       if (!reserved_node) {
+               vpu_err("error: rpc-region of_parse_phandle error\n");
+               return -ENODEV;
+       }
+       if (of_address_to_resource(reserved_node, 0, &reserved_mem)) {
+               vpu_err("error: rpc-region of_address_to_resource error\n");
+               return -EINVAL;
+       }
+       dev->reserved_mem.phy_addr = reserved_mem.start;
+       dev->reserved_mem.size = resource_size(&reserved_mem);
+
+       ret = parse_dt_cores(dev, np);
+       if (ret) {
+               vpu_err("parse cores from dt fail\n");
+               return ret;
+       }
+
+       fw_total_size = 0;
+       rpc_total_size = 0;
+       for (i = 0; i < dev->core_num; i++) {
+               struct core_device *core = &dev->core_dev[i];
+
+               core->m0_p_fw_space_phy = reserved_fw.start + fw_total_size;
+               core->m0_rpc_phy = reserved_rpc.start + rpc_total_size;
+               fw_total_size += core->fw_buf_size;
+               rpc_total_size += core->rpc_buf_size;
+               rpc_total_size += core->print_buf_size;
+       }
+
+       if (fw_total_size > resource_size(&reserved_fw)) {
+               vpu_err("boot-region's size(0x%llx) is less than wanted:0x%x\n",
+                               resource_size(&reserved_fw), fw_total_size);
+               return -EINVAL;
+       }
+       if (rpc_total_size > resource_size(&reserved_rpc)) {
+               vpu_err("rpc-region's size(0x%llx) is less than wanted:0x%x\n",
+                               resource_size(&reserved_rpc), rpc_total_size);
+               return -EINVAL;
+       }
+
+       dev->supported_size.min_width = VPU_ENC_WIDTH_MIN;
+       dev->supported_size.max_width = VPU_ENC_WIDTH_MAX;
+       dev->supported_size.step_width = VPU_ENC_WIDTH_STEP;
+       dev->supported_size.min_height = VPU_ENC_HEIGHT_MIN;
+       dev->supported_size.max_height = VPU_ENC_HEIGHT_MAX;
+       dev->supported_size.step_height = VPU_ENC_HEIGHT_STEP;
+
+       dev->supported_fps.min = VPU_ENC_FRAMERATE_MIN;
+       dev->supported_fps.max = VPU_ENC_FRAMERATE_MAX;
+       dev->supported_fps.step = VPU_ENC_FRAMERATE_STEP;
+
+       ret = of_property_read_u32_index(np, "resolution-max", 0, &val);
+       if (!ret)
+               dev->supported_size.max_width = val;
+
+       ret = of_property_read_u32_index(np, "resolution-max", 1, &val);
+       if (!ret)
+               dev->supported_size.max_height = val;
+
+       ret = of_property_read_u32_index(np, "fps-max", 0, &val);
+       if (!ret)
+               dev->supported_fps.max = val;
+
+       return 0;
+}
+
+static int create_vpu_video_device(struct vpu_dev *dev)
+{
+       int ret;
+
+       dev->pvpu_encoder_dev = video_device_alloc();
+       if (!dev->pvpu_encoder_dev) {
+               vpu_err("alloc vpu encoder video device fail\n");
+               return -ENOMEM;
+       }
+
+       strlcpy(dev->pvpu_encoder_dev->name,
+               vpu_enc_v4l2_videodevice.name,
+               sizeof(dev->pvpu_encoder_dev->name));
+       dev->pvpu_encoder_dev->fops = vpu_enc_v4l2_videodevice.fops;
+       dev->pvpu_encoder_dev->ioctl_ops = vpu_enc_v4l2_videodevice.ioctl_ops;
+       dev->pvpu_encoder_dev->release = video_device_release;
+       dev->pvpu_encoder_dev->vfl_dir = vpu_enc_v4l2_videodevice.vfl_dir;
+       dev->pvpu_encoder_dev->v4l2_dev = &dev->v4l2_dev;
+       video_set_drvdata(dev->pvpu_encoder_dev, dev);
+
+       ret = video_register_device(dev->pvpu_encoder_dev,
+                                       VFL_TYPE_GRABBER,
+                                       ENCODER_NODE_NUMBER);
+       if (ret) {
+               vpu_err("unable to register video encoder device\n");
+               video_device_release(dev->pvpu_encoder_dev);
+               dev->pvpu_encoder_dev = NULL;
+               return ret;
+       }
+
+       return 0;
+}
+
+static int init_vpu_attrs(struct core_device *core)
+{
+       int i;
+
+       WARN_ON(!core);
+
+       for (i = 0; i < ARRAY_SIZE(core->attr); i++) {
+               struct vpu_attr *attr = &core->attr[i];
+
+               attr->core = core;
+               attr->index = i;
+               scnprintf(attr->name, sizeof(attr->name) - 1, "instance.%d.%d",
+                               core->id, attr->index);
+               sysfs_attr_init(&attr->dev_attr.attr);
+               attr->dev_attr.attr.name = attr->name;
+               attr->dev_attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
+               attr->dev_attr.show = show_instance_info;
+
+               atomic64_set(&attr->total_dma_size, 0);
+
+               attr->created = false;
+       }
+
+       return 0;
+}
+
+static int release_vpu_attrs(struct core_device *core)
+{
+       int i;
+
+       WARN_ON(!core);
+
+       for (i = 0; i < ARRAY_SIZE(core->attr); i++) {
+               struct vpu_attr *attr = &core->attr[i];
+
+               if (!attr->created)
+                       continue;
+               device_remove_file(attr->core->generic_dev, &attr->dev_attr);
+       }
+
+       return 0;
+}
+
+static int is_ctx_frozen(struct vpu_ctx *ctx)
+{
+       int is_frozen = 1;
+       int i;
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+
+       for (i = 0; i < GTB_ENC_CMD_RESERVED; i++) {
+               if (attr->statistic.cmd[i] != ctx->sts.cmd[i])
+                       is_frozen = 0;
+               ctx->sts.cmd[i] = attr->statistic.cmd[i];
+       }
+
+       for (i = 0; i < VID_API_ENC_EVENT_RESERVED; i++) {
+               if (attr->statistic.event[i] != ctx->sts.event[i])
+                       is_frozen = 0;
+               ctx->sts.event[i] = attr->statistic.event[i];
+       }
+
+       if (ctx->sts.cmd[GTB_ENC_CMD_FRAME_ENCODE] ==
+                       ctx->sts.event[VID_API_ENC_EVENT_FRAME_DONE])
+               is_frozen = 0;
+
+       if (ctx->sts.cmd[GTB_ENC_CMD_CONFIGURE_CODEC] >
+                       ctx->sts.event[VID_API_ENC_EVENT_MEM_REQUEST])
+               is_frozen = 1;
+       if (ctx->sts.cmd[GTB_ENC_CMD_STREAM_START] >
+                       ctx->sts.event[VID_API_ENC_EVENT_START_DONE])
+               is_frozen = 1;
+       if (ctx->sts.cmd[GTB_ENC_CMD_STREAM_STOP] >
+                       ctx->sts.event[VID_API_ENC_EVENT_STOP_DONE])
+               is_frozen = 1;
+
+       return is_frozen;
+}
+
+static bool check_vpu_ctx_is_hang(struct vpu_ctx *ctx)
+{
+       if (is_ctx_frozen(ctx))
+               ctx->frozen_count++;
+       else
+               ctx->frozen_count = 0;
+
+       if (ctx->frozen_count > VPU_ENC_HANG_THD) {
+               set_bit(VPU_ENC_STATUS_HANG, &ctx->status);
+               ctx->frozen_count = VPU_ENC_HANG_THD;
+       } else if (ctx->frozen_count == 0) {
+               clear_bit(VPU_ENC_STATUS_HANG, &ctx->status);
+       }
+
+       if (test_bit(VPU_ENC_STATUS_HANG, &ctx->status))
+               return true;
+
+       return false;
+}
+
+static void check_vpu_core_is_hang(struct core_device *core)
+{
+       int i;
+       unsigned int instance_count = 0;
+       unsigned int hang_count = 0;
+
+       for (i = 0; i < core->supported_instance_count; i++) {
+               if (!core->ctx[i])
+                       continue;
+
+               if (check_vpu_ctx_is_hang(core->ctx[i]))
+                       hang_count++;
+               instance_count++;
+       }
+
+       if (!instance_count)
+               return;
+       if (hang_count == instance_count)
+               set_core_hang(core);
+       else
+               clear_core_hang(core);
+}
+
+static void handle_vpu_core_watchdog(struct core_device *core)
+{
+       if (!core->fw_is_ready)
+               return;
+       if (core->suspend)
+               return;
+       if (core->snapshot)
+               return;
+
+       check_vpu_core_is_hang(core);
+}
+
+static unsigned long get_timestamp_ns(struct timespec *ts)
+{
+       if (!ts)
+               return 0;
+
+       return ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec;
+}
+
+static void calc_rt_fps(struct vpu_fps_sts *fps,
+                       unsigned long number, struct timespec *ts)
+{
+       unsigned long delta_num;
+       unsigned long delta_ts;
+
+       if (!fps || !ts)
+               return;
+
+       fps->times++;
+       if (fps->times < fps->thd)
+               return;
+
+       if (number >= fps->frame_number) {
+               delta_num = number - fps->frame_number;
+               delta_ts = get_timestamp_ns(ts) - get_timestamp_ns(&fps->ts);
+               if (!delta_ts)
+                       return;
+               fps->fps = delta_num * NSEC_PER_SEC * VPU_FPS_COEF / delta_ts;
+       }
+       fps->times = 0;
+       if (fps->thd) {
+               fps->frame_number = number;
+               memcpy(&fps->ts, ts, sizeof(fps->ts));
+       }
+}
+
+static void statistic_fps_info(struct vpu_statistic *sts)
+{
+       unsigned long encoded_count = sts->encoded_count;
+       struct timespec ts;
+       int i;
+
+       if (!sts->fps_sts_enable)
+               return;
+       getrawmonotonic(&ts);
+       for (i = 0; i < VPU_FPS_STS_CNT; i++)
+               calc_rt_fps(&sts->fps[i], encoded_count, &ts);
+}
+
+static void print_firmware_debug(char *ptr, u32 size)
+{
+       u32 total = 0;
+       u32 len;
+
+       while (total < size) {
+               len = min_t(u32, size - total, 256);
+
+               pr_info("%.*s", len, ptr + total);
+               total += len;
+       }
+}
+
+static void handle_core_firmware_debug(struct core_device *core)
+{
+       char *ptr;
+       u32 rptr;
+       u32 wptr;
+
+       if (!core || !core->print_buf)
+               return;
+
+       if (!test_bit(core->id, &debug_firmware_bitmap))
+               return;
+
+       rptr = core->print_buf->read;
+       wptr = core->print_buf->write;
+       if (rptr == wptr)
+               return;
+
+       ptr = core->print_buf->buffer;
+       pr_info("----mem_printf for VPU Encoder core %d----\n", core->id);
+       if (rptr > wptr) {
+               print_firmware_debug(ptr + rptr, core->print_buf->bytes - rptr);
+               rptr = 0;
+       }
+       if (rptr < wptr) {
+               print_firmware_debug(ptr + rptr, wptr - rptr);
+               rptr = wptr;
+       }
+       if (rptr >= core->print_buf->bytes)
+               rptr = 0;
+       core->print_buf->read = rptr;
+}
+
+static void handle_core_minors(struct core_device *core)
+{
+       int i;
+
+       for (i = 0; i < core->supported_instance_count; i++)
+               statistic_fps_info(&core->attr[i].statistic);
+
+       handle_core_firmware_debug(core);
+}
+
+static void vpu_enc_watchdog_handler(struct work_struct *work)
+{
+       struct delayed_work *dwork;
+       struct vpu_dev *vdev;
+       int i;
+
+       if (!work)
+               return;
+
+       dwork = to_delayed_work(work);
+       vdev = container_of(dwork, struct vpu_dev, watchdog);
+
+       mutex_lock(&vdev->dev_mutex);
+       for (i = 0; i < vdev->core_num; i++)
+               handle_vpu_core_watchdog(&vdev->core_dev[i]);
+       mutex_unlock(&vdev->dev_mutex);
+
+       for (i = 0; i < vdev->core_num; i++)
+               handle_core_minors(&vdev->core_dev[i]);
+
+       vdev->heartbeat++;
+       schedule_delayed_work(&vdev->watchdog,
+                       msecs_to_jiffies(VPU_WATCHDOG_INTERVAL_MS));
+}
+
+static int init_vpu_core_dev(struct core_device *core_dev)
+{
+       int ret;
+
+       if (!core_dev)
+               return -EINVAL;
+
+       mutex_init(&core_dev->cmd_mutex);
+       init_completion(&core_dev->start_cmp);
+       init_completion(&core_dev->snap_done_cmp);
+
+       core_dev->workqueue = alloc_workqueue("vpu",
+                                       WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+       if (!core_dev->workqueue) {
+               vpu_err("%s unable to alloc workqueue\n", __func__);
+               ret = -ENOMEM;
+               return ret;
+       }
+
+       INIT_WORK(&core_dev->msg_work, vpu_enc_msg_run_work);
+
+       ret = vpu_enc_mu_init(core_dev);
+       if (ret) {
+               vpu_err("%s vpu mu init failed\n", __func__);
+               goto error;
+       }
+       //firmware space for M0
+       core_dev->m0_p_fw_space_vir =
+               ioremap_wc(core_dev->m0_p_fw_space_phy, core_dev->fw_buf_size);
+       if (!core_dev->m0_p_fw_space_vir)
+               vpu_err("failed to remap space for M0 firmware\n");
+
+       cleanup_firmware_memory(core_dev);
+
+       core_dev->m0_rpc_virt =
+               ioremap_wc(core_dev->m0_rpc_phy,
+                       core_dev->rpc_buf_size + core_dev->print_buf_size);
+       if (!core_dev->m0_rpc_virt)
+               vpu_err("failed to remap space for shared memory\n");
+
+       memset_io(core_dev->m0_rpc_virt, 0, core_dev->rpc_buf_size);
+
+       reset_vpu_core_dev(core_dev);
+
+       init_vpu_attrs(core_dev);
+
+       scnprintf(core_dev->name, sizeof(core_dev->name) - 1,
+                       "core.%d", core_dev->id);
+       sysfs_attr_init(&core_dev->core_attr.attr);
+       core_dev->core_attr.attr.name = core_dev->name;
+       core_dev->core_attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
+       core_dev->core_attr.show = show_core_info;
+       device_create_file(core_dev->generic_dev, &core_dev->core_attr);
+
+       return 0;
+error:
+       if (core_dev->workqueue) {
+               destroy_workqueue(core_dev->workqueue);
+               core_dev->workqueue = NULL;
+       }
+       return ret;
+}
+
+static int uninit_vpu_core_dev(struct core_device *core_dev)
+{
+       if (!core_dev)
+               return -EINVAL;
+
+       if (core_dev->core_attr.attr.name)
+               device_remove_file(core_dev->generic_dev, &core_dev->core_attr);
+       release_vpu_attrs(core_dev);
+       if (core_dev->workqueue) {
+               cancel_work_sync(&core_dev->msg_work);
+               destroy_workqueue(core_dev->workqueue);
+               core_dev->workqueue = NULL;
+       }
+
+       if (core_dev->m0_p_fw_space_vir) {
+               iounmap(core_dev->m0_p_fw_space_vir);
+               core_dev->m0_p_fw_space_vir = NULL;
+       }
+       core_dev->m0_p_fw_space_phy = 0;
+
+       if (core_dev->m0_rpc_virt) {
+               iounmap(core_dev->m0_rpc_virt);
+               core_dev->m0_rpc_virt = NULL;
+       }
+       core_dev->m0_rpc_phy = 0;
+
+       if (core_dev->mu_base_virtaddr)
+               core_dev->mu_base_virtaddr = NULL;
+
+       if (core_dev->generic_dev) {
+               put_device(core_dev->generic_dev);
+               core_dev->generic_dev = NULL;
+       }
+
+       return 0;
+}
+
+static void init_vpu_enc_watchdog(struct vpu_dev *vdev)
+{
+       if (!vdev)
+               return;
+
+       INIT_DELAYED_WORK(&vdev->watchdog, vpu_enc_watchdog_handler);
+       schedule_delayed_work(&vdev->watchdog,
+                       msecs_to_jiffies(VPU_WATCHDOG_INTERVAL_MS));
+}
+
+static int check_vpu_encoder_is_available(void)
+{
+       sc_ipc_t mu_ipc;
+       sc_ipc_id_t mu_id;
+       uint32_t fuse = 0xffff;
+       int ret;
+
+       ret = sc_ipc_getMuID(&mu_id);
+       if (ret) {
+               vpu_err("sc_ipc_getMuID() can't obtain mu id SCI! %d\n",
+                               ret);
+               return -EINVAL;
+       }
+
+       ret = sc_ipc_open(&mu_ipc, mu_id);
+       if (ret) {
+               vpu_err("sc_ipc_getMuID() can't open MU channel to SCU! %d\n",
+                               ret);
+               return -EINVAL;
+       }
+
+       ret = sc_misc_otp_fuse_read(mu_ipc, VPU_DISABLE_BITS, &fuse);
+       sc_ipc_close(mu_ipc);
+       if (ret) {
+               vpu_err("sc_misc_otp_fuse_read fail! %d\n", ret);
+               return -EINVAL;
+       }
+
+       vpu_dbg(LVL_INFO, "mu_id = %d, fuse[7] = 0x%x\n", mu_id, fuse);
+       if (fuse & VPU_ENCODER_MASK) {
+               vpu_err("----Error, VPU Encoder is disabled\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static const struct of_device_id vpu_enc_of_match[];
+static int vpu_enc_probe(struct platform_device *pdev)
+{
+       struct vpu_dev *dev;
+       struct device_node *np = pdev->dev.of_node;
+       const struct of_device_id *dev_id = NULL;
+       struct resource *res = NULL;
+       u_int32 i;
+       int ret;
+
+       if (!np) {
+               vpu_err("error: %s of_node is NULL\n", __func__);
+               return -EINVAL;
+       }
+
+       dev_id = of_match_device(vpu_enc_of_match, &pdev->dev);
+       if (!dev_id) {
+               vpu_err("unmatch vpu encoder device\n");
+               return -EINVAL;
+       }
+       vpu_dbg(LVL_INFO, "probe %s\n", dev_id->compatible);
+
+       if (check_vpu_encoder_is_available())
+               return -EINVAL;
+
+       dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+       dev->plat_type = *(enum PLAT_TYPE *)dev_id->data;
+       dev->plat_dev = pdev;
+       dev->generic_dev = get_device(&pdev->dev);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!res)
+               goto error_put_dev;
+
+       dev->reg_vpu_base = res->start;
+       dev->reg_vpu_size = resource_size(res);
+
+       ret = parse_dt_info(dev, np);
+       if (ret) {
+               vpu_err("parse device tree fail\n");
+               goto error_put_dev;
+       }
+
+       dev->regs_base = ioremap(dev->reg_vpu_base, dev->reg_vpu_size);
+       if (!dev->regs_base) {
+               vpu_err("%s could not map regs_base\n", __func__);
+               ret = PTR_ERR(dev->regs_base);
+               goto error_put_dev;
+       }
+
+       ret = vpu_enc_init_reserved_memory(&dev->reserved_mem);
+       if (ret) {
+               vpu_err("%s couldn't init reserved memory\n", __func__);
+               goto error_iounmap;
+       }
+
+       ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
+       if (ret) {
+               vpu_err("%s unable to register v4l2 dev\n", __func__);
+               goto error_reserved_mem;
+       }
+
+       platform_set_drvdata(pdev, dev);
+
+       ret = create_vpu_video_device(dev);
+       if (ret) {
+               vpu_err("create vpu video device fail\n");
+               goto error_unreg_v4l2;
+       }
+
+       pm_runtime_enable(&pdev->dev);
+       pm_runtime_get_sync(&pdev->dev);
+
+       vpu_enc_enable_hw(dev);
+
+       mutex_init(&dev->dev_mutex);
+       for (i = 0; i < dev->core_num; i++) {
+               dev->core_dev[i].id = i;
+               dev->core_dev[i].generic_dev = get_device(dev->generic_dev);
+               dev->core_dev[i].vdev = dev;
+               ret = init_vpu_core_dev(&dev->core_dev[i]);
+               if (ret)
+                       break;
+       }
+       if (i == 0)
+               goto error_init_core;
+       dev->core_num = i;
+
+       pm_runtime_put_sync(&pdev->dev);
+
+       device_create_file(&pdev->dev, &dev_attr_meminfo);
+       device_create_file(&pdev->dev, &dev_attr_buffer);
+       device_create_file(&pdev->dev, &dev_attr_fpsinfo);
+       device_create_file(&pdev->dev, &dev_attr_vpuinfo);
+       init_vpu_enc_watchdog(dev);
+       vpu_dbg(LVL_INFO, "VPU Encoder registered\n");
+
+       return 0;
+
+error_init_core:
+       for (i = 0; i < dev->core_num; i++)
+               uninit_vpu_core_dev(&dev->core_dev[i]);
+
+       vpu_enc_disable_hw(dev);
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
+       if (dev->pvpu_encoder_dev) {
+               video_unregister_device(dev->pvpu_encoder_dev);
+               dev->pvpu_encoder_dev = NULL;
+       }
+error_unreg_v4l2:
+       v4l2_device_unregister(&dev->v4l2_dev);
+error_reserved_mem:
+       vpu_enc_release_reserved_memory(&dev->reserved_mem);
+error_iounmap:
+       if (dev->regs_base) {
+               iounmap(dev->regs_base);
+               dev->regs_base = NULL;
+       }
+error_put_dev:
+       if (dev->generic_dev) {
+               put_device(dev->generic_dev);
+               dev->generic_dev = NULL;
+       }
+       return -EINVAL;
+}
+
+static int vpu_enc_remove(struct platform_device *pdev)
+{
+       struct vpu_dev *dev = platform_get_drvdata(pdev);
+       u_int32 i;
+
+       cancel_delayed_work_sync(&dev->watchdog);
+       device_remove_file(&pdev->dev, &dev_attr_vpuinfo);
+       device_remove_file(&pdev->dev, &dev_attr_fpsinfo);
+       device_remove_file(&pdev->dev, &dev_attr_buffer);
+       device_remove_file(&pdev->dev, &dev_attr_meminfo);
+
+       pm_runtime_get_sync(&pdev->dev);
+       for (i = 0; i < dev->core_num; i++)
+               uninit_vpu_core_dev(&dev->core_dev[i]);
+
+       vpu_enc_disable_hw(dev);
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
+
+       if (video_get_drvdata(dev->pvpu_encoder_dev))
+               video_unregister_device(dev->pvpu_encoder_dev);
+
+       v4l2_device_unregister(&dev->v4l2_dev);
+       vpu_enc_release_reserved_memory(&dev->reserved_mem);
+       if (dev->regs_base) {
+               iounmap(dev->regs_base);
+               dev->regs_base = NULL;
+       }
+       if (dev->generic_dev) {
+               put_device(dev->generic_dev);
+               dev->generic_dev = NULL;
+       }
+
+       vpu_dbg(LVL_INFO, "VPU Encoder removed\n");
+
+       return 0;
+}
+
+static int vpu_enc_runtime_suspend(struct device *dev)
+{
+       return 0;
+}
+
+static int vpu_enc_runtime_resume(struct device *dev)
+{
+       return 0;
+}
+
+static int is_core_activated(struct core_device *core)
+{
+       WARN_ON(!core);
+
+       if (readl_relaxed(core->mu_base_virtaddr + MU_B0_REG_CONTROL) == 0)
+               return false;
+       else
+               return true;
+}
+
+static int is_need_shapshot(struct vpu_ctx *ctx)
+{
+       if (!test_bit(VPU_ENC_STATUS_INITIALIZED, &ctx->status))
+               return 0;
+       if (!test_bit(VPU_ENC_STATUS_CONFIGURED, &ctx->status))
+               return 0;
+       if (test_bit(VPU_ENC_STATUS_CLOSED, &ctx->status))
+               return 0;
+       if (test_bit(VPU_ENC_STATUS_STOP_SEND, &ctx->status))
+               return 0;
+       if (test_bit(VPU_ENC_STATUS_STOP_DONE, &ctx->status))
+               return 0;
+
+       return 1;
+}
+
+static int vpu_enc_snapshot(struct vpu_ctx *ctx)
+{
+       int ret;
+
+       if (!ctx)
+               return -EINVAL;
+
+       vpu_dbg(LVL_INFO, "core[%d] snapshot\n", ctx->core_dev->id);
+       vpu_ctx_send_cmd(ctx, GTB_ENC_CMD_SNAPSHOT, 0, NULL);
+       ret = wait_for_completion_timeout(&ctx->core_dev->snap_done_cmp,
+                                               msecs_to_jiffies(1000));
+       if (!ret)
+               vpu_err("error:wait for snapdone event timeout!\n");
+       else
+               ctx->core_dev->snapshot = true;
+
+       return 0;
+}
+
+static int resume_from_snapshot(struct core_device *core)
+{
+       int ret = 0;
+
+       if (!core)
+               return -EINVAL;
+       if (!core->snapshot)
+               return 0;
+
+       vpu_dbg(LVL_INFO, "core[%d] resume from snapshot\n", core->id);
+
+       init_completion(&core->start_cmp);
+       set_vpu_fw_addr(core->vdev, core);
+       ret = wait_for_start_done(core, 1);
+       if (ret) {
+               set_core_force_release(core);
+               reset_vpu_core_dev(core);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int re_download_firmware(struct core_device *core)
+{
+       if (!core)
+               return -EINVAL;
+
+       vpu_dbg(LVL_INFO, "re download firmware for core[%d]\n", core->id);
+
+       reset_vpu_core_dev(core);
+       return download_vpu_firmware(core->vdev, core);
+}
+
+static int suspend_instance(struct vpu_ctx *ctx)
+{
+       int ret = 0;
+
+       if (!ctx)
+               return 0;
+
+       if (test_bit(VPU_ENC_STATUS_STOP_REQ, &ctx->status) ||
+               test_bit(VPU_ENC_STATUS_STOP_SEND, &ctx->status))
+               wait_for_stop_done(ctx);
+
+       mutex_lock(&ctx->instance_mutex);
+       if (!ctx->core_dev->snapshot && is_need_shapshot(ctx))
+               ret = vpu_enc_snapshot(ctx);
+       set_bit(VPU_ENC_STATUS_SNAPSHOT, &ctx->status);
+       mutex_unlock(&ctx->instance_mutex);
+
+       return ret;
+}
+
+static int resume_instance(struct vpu_ctx *ctx)
+{
+       if (!ctx)
+               return 0;
+
+       clear_bit(VPU_ENC_STATUS_SNAPSHOT, &ctx->status);
+
+       return 0;
+}
+
+static int suspend_core(struct core_device *core)
+{
+       int i;
+       int ret = 0;
+
+       WARN_ON(!core);
+
+       core->snapshot = false;
+
+       if (!core->fw_is_ready)
+               return 0;
+
+       for (i = 0; i < core->supported_instance_count; i++) {
+               if (!core->ctx[i])
+                       continue;
+               ret = suspend_instance(core->ctx[i]);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < core->supported_instance_count; i++)
+               vpu_ctx_power_off(core->ctx[i]);
+
+       core->suspend = true;
+
+       return 0;
+}
+
+static int resume_core(struct core_device *core)
+{
+       int ret = 0;
+       u32 instance_count = 0;
+       int i;
+
+       WARN_ON(!core);
+
+       if (!core->suspend)
+               return 0;
+
+       for (i = 0; i < core->supported_instance_count; i++) {
+               if (!core->ctx[i])
+                       continue;
+               instance_count++;
+               vpu_ctx_power_on(core->ctx[i]);
+               resume_instance(core->ctx[i]);
+       }
+
+       /* if the core isn't activated, it means it has been power off and on */
+       if (!is_core_activated(core)) {
+               if (!core->vdev->hw_enable)
+                       vpu_enc_enable_hw(core->vdev);
+               if (core->snapshot)
+                       ret = resume_from_snapshot(core);
+               else if (instance_count)
+                       ret = re_download_firmware(core);
+               else
+                       reset_vpu_core_dev(core);
+       } else {
+               if (core->snapshot || instance_count)
+                       ret = sw_reset_firmware(core, 1);
+       }
+
+       core->snapshot = false;
+       core->suspend = false;
+
+       return ret;
+}
+
+static int vpu_enc_suspend(struct device *dev)
+{
+       struct vpu_dev *vpudev = (struct vpu_dev *)dev_get_drvdata(dev);
+       int i;
+       int ret = 0;
+
+       vpu_dbg(LVL_INFO, "suspend\n");
+
+       mutex_lock(&vpudev->dev_mutex);
+       pm_runtime_get_sync(dev);
+       for (i = 0; i < vpudev->core_num; i++) {
+               ret = suspend_core(&vpudev->core_dev[i]);
+               if (ret)
+                       break;
+       }
+       pm_runtime_put_sync(dev);
+       mutex_unlock(&vpudev->dev_mutex);
+
+       vpu_dbg(LVL_INFO, "suspend done\n");
+
+       return ret;
+}
+
+static int vpu_enc_resume(struct device *dev)
+{
+       struct vpu_dev *vpudev = (struct vpu_dev *)dev_get_drvdata(dev);
+       int i;
+       int ret = 0;
+
+       vpu_dbg(LVL_INFO, "resume\n");
+
+       mutex_lock(&vpudev->dev_mutex);
+       pm_runtime_get_sync(dev);
+       vpudev->hw_enable = false;
+       for (i = 0; i < vpudev->core_num; i++) {
+               ret = resume_core(&vpudev->core_dev[i]);
+               if (ret)
+                       break;
+       }
+       vpudev->hw_enable = true;
+       pm_runtime_put_sync(dev);
+       mutex_unlock(&vpudev->dev_mutex);
+
+       vpu_dbg(LVL_INFO, "resume done\n");
+
+       return ret;
+}
+
+static const struct dev_pm_ops vpu_enc_pm_ops = {
+       SET_RUNTIME_PM_OPS(vpu_enc_runtime_suspend, vpu_enc_runtime_resume, NULL)
+       SET_SYSTEM_SLEEP_PM_OPS(vpu_enc_suspend, vpu_enc_resume)
+};
+
+static enum PLAT_TYPE supported_plat_types[PLAT_TYPE_RESERVED] = {
+       [IMX8QXP] = IMX8QXP,
+       [IMX8QM] = IMX8QM,
+};
+
+static const struct of_device_id vpu_enc_of_match[] = {
+       { .compatible = "nxp,imx8qm-b0-vpuenc",
+         .data = (void *)&supported_plat_types[IMX8QM]
+       },
+       { .compatible = "nxp,imx8qxp-b0-vpuenc",
+         .data = (void *)&supported_plat_types[IMX8QXP]
+       },
+       {}
+}
+MODULE_DEVICE_TABLE(of, vpu_enc_of_match);
+
+static struct platform_driver vpu_enc_driver = {
+       .probe = vpu_enc_probe,
+       .remove = vpu_enc_remove,
+       .driver = {
+               .name = "vpu-b0-encoder",
+               .of_match_table = vpu_enc_of_match,
+               .pm = &vpu_enc_pm_ops,
+       },
+};
+module_platform_driver(vpu_enc_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("Linux VPU driver for Freescale i.MX/MXC");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(VPU_ENC_DRIVER_VERSION);
+
+module_param(vpu_dbg_level_encoder, int, 0644);
+MODULE_PARM_DESC(vpu_dbg_level_encoder, "Debug level (0-4)");
+
+module_param(reset_on_hang, int, 0644);
+MODULE_PARM_DESC(reset_on_hang, "reset on hang (0-1)");
+
+module_param(show_detail_index, int, 0644);
+MODULE_PARM_DESC(show_detail_index, "show memory detail info index");
+
+module_param(debug_firmware_bitmap, long, 0644);
+MODULE_PARM_DESC(debug_firmware_bitmap, "firmware debug info switch");
+
diff --git a/drivers/mxc/vpu_windsor/vpu_encoder_b0.h b/drivers/mxc/vpu_windsor/vpu_encoder_b0.h
new file mode 100644 (file)
index 0000000..14aedd5
--- /dev/null
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2018 NXP
+ */
+
+/*
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/*!
+ * @file vpu_encoder_b0.h
+ *
+ * @brief VPU ENCODER B0 definition
+ *
+ */
+#ifndef __VPU_ENCODER_B0_H__
+#define __VPU_ENCODER_B0_H__
+
+#include <linux/irqreturn.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/videobuf2-v4l2.h>
+#include <soc/imx8/sc/svc/irq/api.h>
+#include <soc/imx8/sc/ipc.h>
+#include <soc/imx8/sc/sci.h>
+#include <linux/mx8_mu.h>
+#include <media/v4l2-event.h>
+#include "vpu_encoder_rpc.h"
+#include "vpu_encoder_config.h"
+
+extern unsigned int vpu_dbg_level_encoder;
+
+#define v4l2_fh_to_ctx(__fh) \
+       container_of(__fh, struct vpu_ctx, fh)
+#define v4l2_ctrl_to_ctx(__ctrl) \
+       container_of((__ctrl)->handler, struct vpu_ctx, ctrl_handler)
+
+#define VPU_ENC_MAX_CORE_NUM           2
+#define VPU_MAX_BUFFER                 32
+#define M0FW_FILENAME                  "vpu/vpu_fw_imx8_enc.bin"
+#define MMAP_BUF_TYPE_SHIFT            28
+#define MMAP_BUF_TYPE_MASK             0xF0000000
+#define M0_BOOT_SIZE_DEFAULT           0x1000000
+#define M0_BOOT_SIZE_MIN               0x100000
+#define RPC_SIZE_DEFAULT               0x80000
+#define RPC_SIZE_MIN                   0x20000
+#define PRINT_SIZE_DEFAULT             0x80000
+#define PRINT_SIZE_MIN                 0x20000
+#define STREAM_SIZE                    0x300000
+#define MU_B0_REG_CONTROL              (0x10000 + 0x24)
+
+#define MIN_BUFFER_COUNT               3
+#define BITRATE_COEF                   1024
+#define BITRATE_LOW_THRESHOLD          (16)
+#define BITRATE_HIGH_THRESHOLD         (240 * 1024)
+#define BITRATE_DEFAULT_TARGET         (2 * 1024)
+#define BITRATE_DEFAULT_PEAK           (8 * 1024)
+#define GOP_H_THRESHOLD                        300
+#define GOP_L_THRESHOLD                        1
+#define GOP_DEFAULT                    30
+#define BFRAMES_H_THRESHOLD            4
+#define BFRAMES_L_THRESHOLD            0
+#define BFRAMES_DEFAULT                        2
+#define QP_MAX                         51
+#define QP_MIN                         0
+#define QP_DEFAULT                     25
+
+#define VPU_DISABLE_BITS               0x7
+#define VPU_ENCODER_MASK               0x1
+
+#define ENCODER_NODE_NUMBER 13 //use /dev/video13 as encoder node
+struct vpu_v4l2_control {
+       uint32_t id;
+       enum v4l2_ctrl_type type;
+       uint32_t minimum;
+       uint32_t maximum;
+       uint32_t step;
+       uint32_t default_value;
+       uint32_t menu_skip_mask;
+       bool is_volatile;
+};
+
+typedef enum{
+       INIT_DONE = 1,
+       RPC_BUF_OFFSET,
+       BOOT_ADDRESS,
+       COMMAND,
+       EVENT
+} MSG_Type;
+
+enum PLAT_TYPE {
+       IMX8QXP = 0,
+       IMX8QM  = 1,
+       IMX8DM,
+       IMX8DX,
+       PLAT_TYPE_RESERVED
+};
+
+enum QUEUE_TYPE {
+       V4L2_SRC = 0,
+       V4L2_DST = 1,
+};
+
+enum vpu_video_standard {
+       VPU_VIDEO_UNDEFINED = 0,
+       VPU_VIDEO_AVC = 1,
+       VPU_VIDEO_VC1 = 2,
+       VPU_VIDEO_MPEG2 = 3,
+       VPU_VIDEO_AVS = 4,
+       VPU_VIDEO_ASP = 5,
+       VPU_VIDEO_JPEG = 6,
+       VPU_VIDEO_RV8 = 7,
+       VPU_VIDEO_RV9 = 8,
+       VPU_VIDEO_VP6 = 9,
+       VPU_VIDEO_SPK = 10,
+       VPU_VIDEO_VP8 = 11,
+       VPU_VIDEO_AVC_MVC = 12,
+       VPU_VIDEO_HEVC = 13,
+       VPU_VIDEO_VP9 = 14,
+};
+
+#define VPU_PIX_FMT_AVS         v4l2_fourcc('A', 'V', 'S', '0')
+#define VPU_PIX_FMT_ASP         v4l2_fourcc('A', 'S', 'P', '0')
+#define VPU_PIX_FMT_RV8         v4l2_fourcc('R', 'V', '8', '0')
+#define VPU_PIX_FMT_RV9         v4l2_fourcc('R', 'V', '9', '0')
+#define VPU_PIX_FMT_VP6         v4l2_fourcc('V', 'P', '6', '0')
+#define VPU_PIX_FMT_SPK         v4l2_fourcc('S', 'P', 'K', '0')
+#define VPU_PIX_FMT_HEVC        v4l2_fourcc('H', 'E', 'V', 'C')
+#define VPU_PIX_FMT_VP9         v4l2_fourcc('V', 'P', '9', '0')
+#define VPU_PIX_FMT_LOGO        v4l2_fourcc('L', 'O', 'G', 'O')
+
+#define VPU_PIX_FMT_TILED_8     v4l2_fourcc('Z', 'T', '0', '8')
+#define VPU_PIX_FMT_TILED_10    v4l2_fourcc('Z', 'T', '1', '0')
+
+enum vpu_pixel_format {
+       VPU_HAS_COLOCATED = 0x00000001,
+       VPU_HAS_SPLIT_FLD = 0x00000002,
+       VPU_PF_MASK       = ~(VPU_HAS_COLOCATED | VPU_HAS_SPLIT_FLD),
+
+       VPU_IS_TILED      = 0x000000100,
+       VPU_HAS_10BPP     = 0x00000200,
+
+       VPU_IS_PLANAR     = 0x00001000,
+       VPU_IS_SEMIPLANAR = 0x00002000,
+       VPU_IS_PACKED     = 0x00004000,
+
+       // Merged definitions using above flags:
+       VPU_PF_UNDEFINED  = 0,
+       VPU_PF_YUV420_SEMIPLANAR = 0x00010000 | VPU_IS_SEMIPLANAR,
+       VPU_PF_YUV420_PLANAR = 0x00020000 | VPU_IS_PLANAR,
+       VPU_PF_UYVY = 0x00040000 | VPU_IS_PACKED,
+       VPU_PF_TILED_8BPP = 0x00080000 | VPU_IS_TILED | VPU_IS_SEMIPLANAR,
+       VPU_PF_TILED_10BPP = 0x00100000 | VPU_IS_TILED | VPU_IS_SEMIPLANAR | VPU_HAS_10BPP,
+};
+
+struct vpu_ctx;
+struct core_device;
+struct vpu_dev;
+struct vpu_v4l2_fmt {
+       char *name;
+       unsigned int fourcc;
+       unsigned int num_planes;
+       unsigned int venc_std;
+       unsigned int is_yuv;
+};
+
+struct vb2_data_req {
+       struct list_head  list;
+       struct vb2_buffer *vb2_buf;
+       u_int32 sequence;
+       u_int32 buffer_flags;
+};
+
+enum ENC_RW_FLAG {
+       VPU_ENC_FLAG_WRITEABLE,
+       VPU_ENC_FLAG_READABLE
+};
+
+struct queue_data {
+       unsigned int width;
+       unsigned int height;
+       unsigned int sizeimage[VB2_MAX_PLANES];
+       struct v4l2_rect rect;
+       int buf_type; // v4l2_buf_type
+       bool vb2_q_inited;
+       struct vb2_queue vb2_q;    // vb2 queue
+       struct list_head drv_q;    // driver queue
+       struct semaphore drv_q_lock;
+       struct vb2_data_req vb2_reqs[VPU_MAX_BUFFER];
+       enum QUEUE_TYPE type;
+       struct vpu_v4l2_fmt *supported_fmts;
+       unsigned int fmt_count;
+       struct vpu_v4l2_fmt *current_fmt;
+       unsigned long rw_flag;
+       struct list_head frame_q;
+       atomic64_t frame_count;
+       struct list_head frame_idle;
+       struct vpu_ctx *ctx;
+       char desc[64];
+};
+
+struct vpu_strip_info {
+       unsigned long count;
+       unsigned long max;
+       unsigned long total;
+};
+
+struct vpu_fps_sts {
+       unsigned int thd;
+       unsigned int times;
+       unsigned long frame_number;
+       struct timespec ts;
+       unsigned long fps;
+};
+
+struct vpu_statistic {
+       unsigned long cmd[GTB_ENC_CMD_RESERVED + 1];
+       unsigned long event[VID_API_ENC_EVENT_RESERVED + 1];
+       unsigned long current_cmd;
+       unsigned long current_event;
+       struct timespec ts_cmd;
+       struct timespec ts_event;
+       unsigned long yuv_count;
+       unsigned long encoded_count;
+       unsigned long h264_count;
+       struct {
+               struct vpu_strip_info fw;
+               struct vpu_strip_info begin;
+               struct vpu_strip_info eos;
+       } strip_sts;
+       bool fps_sts_enable;
+       struct vpu_fps_sts fps[VPU_FPS_STS_CNT];
+       unsigned long timestamp_overwrite;
+};
+
+struct vpu_attr {
+       struct device_attribute dev_attr;
+       char name[64];
+       u32 index;
+       struct core_device *core;
+
+       pid_t pid;
+       pid_t tgid;
+
+       struct vpu_statistic statistic;
+       MEDIAIP_ENC_PARAM param;
+
+       unsigned long ts_start[2];
+       unsigned long msg_count;
+       atomic64_t total_dma_size;
+
+       bool created;
+};
+
+struct print_buf_desc {
+       u32 start_h_phy;
+       u32 start_h_vir;
+       u32 start_m;
+       u32 bytes;
+       u32 read;
+       u32 write;
+       char buffer[0];
+};
+
+struct core_device {
+       void *m0_p_fw_space_vir;
+       u_int32 m0_p_fw_space_phy;
+       u32 fw_buf_size;
+       u32 fw_actual_size;
+       void *m0_rpc_virt;
+       u_int32 m0_rpc_phy;
+       u32 rpc_buf_size;
+       u32 print_buf_size;
+       u32 rpc_actual_size;
+       struct print_buf_desc *print_buf;
+
+       struct mutex cmd_mutex;
+       bool fw_is_ready;
+       bool firmware_started;
+       struct completion start_cmp;
+       struct completion snap_done_cmp;
+       struct workqueue_struct *workqueue;
+       struct work_struct msg_work;
+       void __iomem *mu_base_virtaddr;
+       unsigned int vpu_mu_id;
+       int vpu_mu_init;
+
+       u32 supported_instance_count;
+       struct vpu_ctx *ctx[VID_API_NUM_STREAMS];
+       struct vpu_attr attr[VID_API_NUM_STREAMS];
+       struct shared_addr shared_mem;
+       u32 id;
+       u32 reg_base;
+       u32 reg_size;
+       u32 reg_csr_base;
+       u32 reg_csr_size;
+       int irq;
+       struct device *generic_dev;
+       struct vpu_dev *vdev;
+       bool snapshot;
+       bool suspend;
+       bool hang;
+       struct device_attribute core_attr;
+       char name[64];
+       unsigned long reset_times;
+};
+
+struct vpu_enc_mem_item {
+       struct list_head list;
+       void *virt_addr;
+       unsigned long phy_addr;
+       unsigned long size;
+       unsigned long offset;
+};
+
+struct vpu_enc_mem_info {
+       void *virt_addr;
+       unsigned long phy_addr;
+       unsigned long size;
+       unsigned long bytesused;
+       struct list_head memorys;
+       spinlock_t lock;
+};
+
+struct vpu_dev {
+       struct device *generic_dev;
+       struct v4l2_device v4l2_dev;
+       struct video_device *pvpu_encoder_dev;
+       struct platform_device *plat_dev;
+       struct clk *clk_m0;
+       u32 reg_vpu_base;
+       u32 reg_vpu_size;
+       u32 reg_rpc_system;
+       void __iomem *regs_base;
+       struct mutex dev_mutex;
+       struct core_device core_dev[VPU_ENC_MAX_CORE_NUM];
+       u_int32 plat_type;
+       u_int32 core_num;
+       bool hw_enable;
+
+       struct delayed_work watchdog;
+       u8 heartbeat;
+
+       struct {
+               u32 min_width;
+               u32 max_width;
+               u32 step_width;
+               u32 min_height;
+               u32 max_height;
+               u32 step_height;
+       } supported_size;
+       struct {
+               u32 min;
+               u32 max;
+               u32 step;
+       } supported_fps;
+       struct vpu_enc_mem_info reserved_mem;
+};
+
+struct buffer_addr {
+       void *virt_addr;
+       dma_addr_t phy_addr;
+       u_int32 size;
+};
+
+enum {
+       VPU_ENC_STATUS_INITIALIZED,
+       VPU_ENC_STATUS_OUTPUT_READY = 18,
+       VPU_ENC_STATUS_DATA_READY = 19,
+       VPU_ENC_STATUS_SNAPSHOT = 20,
+       VPU_ENC_STATUS_FORCE_RELEASE = 21,
+       VPU_ENC_STATUS_EOS_SEND = 22,
+       VPU_ENC_STATUS_START_SEND = 23,
+       VPU_ENC_STATUS_START_DONE = 24,
+       VPU_ENC_STATUS_STOP_REQ = 25,
+       VPU_ENC_STATUS_STOP_SEND = 26,
+       VPU_ENC_STATUS_STOP_DONE = 27,
+       VPU_ENC_STATUS_CLOSED = 28,
+       VPU_ENC_STATUS_CONFIGURED = 29,
+       VPU_ENC_STATUS_HANG = 30,
+       VPU_ENC_STATUS_KEY_FRAME = 31
+};
+
+struct vpu_ctx {
+       struct vpu_dev *dev;
+       struct v4l2_fh fh;
+
+       struct v4l2_ctrl_handler ctrl_handler;
+       bool ctrl_inited;
+
+       int str_index;
+       unsigned long status;
+       struct queue_data q_data[2];
+       struct mutex instance_mutex;
+       struct work_struct instance_work;
+       struct workqueue_struct *instance_wq;
+       bool ctx_released;
+       struct buffer_addr encoder_stream;
+       struct buffer_addr encFrame[MEDIAIP_MAX_NUM_WINDSOR_SRC_FRAMES];
+       struct buffer_addr refFrame[MEDIAIP_MAX_NUM_WINDSOR_REF_FRAMES];
+       struct buffer_addr actFrame;
+       struct buffer_addr enc_buffer;
+       MEDIAIP_ENC_MEM_REQ_DATA mem_req;
+       struct core_device *core_dev;
+
+       struct completion stop_cmp;
+       bool power_status;
+
+       struct list_head msg_q;
+       struct list_head idle_q;
+
+       struct vpu_statistic sts;
+       unsigned int frozen_count;
+       u_int32 sequence;
+       s64 timestams[VPU_ENC_SEQ_CAPACITY];
+};
+
+#define LVL_ERR                (1 << 0)
+#define LVL_WARN       (1 << 1)
+#define LVL_ALL                (1 << 2)
+#define LVL_IRQ                (1 << 3)
+#define LVL_INFO       (1 << 4)
+#define LVL_CMD                (1 << 5)
+#define LVL_EVT                (1 << 6)
+#define LVL_DEBUG      (1 << 7)
+#define LVL_CTRL       (1 << 8)
+#define LVL_RPC                (1 << 9)
+#define LVL_MSG                (1 << 10)
+#define LVL_MEM                (1 << 11)
+#define LVL_BUF                (1 << 12)
+#define LVL_FRAME      (1 << 13)
+#define LVL_FUNC       (1 << 16)
+
+#ifndef TAG
+#define TAG    "[VPU Encoder]\t "
+#endif
+
+#define vpu_dbg(level, fmt, arg...) \
+       do { \
+               if ((vpu_dbg_level_encoder & (level)) || ((level) & LVL_ERR)) \
+                       pr_info(TAG""fmt, ## arg); \
+       } while (0)
+
+#define vpu_err(fmt, arg...)   vpu_dbg(LVL_ERR, fmt, ##arg)
+#define vpu_log_func()         vpu_dbg(LVL_FUNC, "%s()\n", __func__)
+
+u32 cpu_phy_to_mu(struct core_device *dev, u32 addr);
+struct vpu_attr *get_vpu_ctx_attr(struct vpu_ctx *ctx);
+struct vpu_ctx *get_vpu_attr_ctx(struct vpu_attr *attr);
+
+#ifndef VPU_SAFE_RELEASE
+#define VPU_SAFE_RELEASE(p, func)      \
+       do {\
+               if (p) {\
+                       func(p);\
+                       p = NULL;\
+               } \
+       } while (0)
+#endif
+
+#endif
diff --git a/drivers/mxc/vpu_windsor/vpu_encoder_config.h b/drivers/mxc/vpu_windsor/vpu_encoder_config.h
new file mode 100644 (file)
index 0000000..dee8403
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright(c) 2018 NXP. All rights reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * vpu_encoder_config.h
+ *
+ * Author Ming Qian<ming.qian@nxp.com>
+ */
+#ifndef _VPU_ENCODER_CONFIG_H
+#define _VPU_ENCODER_CONFIG_H
+
+#define VPU_ENC_WIDTH_MAX              1920
+#define VPU_ENC_HEIGHT_MAX             1080
+#define VPU_ENC_WIDTH_MIN              64
+#define VPU_ENC_HEIGHT_MIN             48
+#define VPU_ENC_WIDTH_STEP             16
+#define VPU_ENC_HEIGHT_STEP            2
+#define VPU_ENC_FRAMERATE_MAX          120
+#define VPU_ENC_FRAMERATE_MIN          1
+#define VPU_ENC_FRAMERATE_STEP         1
+
+#define VPU_ENC_WIDTH_DEFAULT          1920
+#define VPU_ENC_HEIGHT_DEFAULT         1080
+#define VPU_ENC_FRAMERATE_DEFAULT      30
+
+#define VPU_MEM_PATTERN                        0x5a5a5a5a
+
+#define VPU_TAIL_SERACH_SIZE           16
+#define VPU_STRM_END_PATTERN           {0x0, 0x0, 0x1, 0xb}
+#define VPU_STRM_BEGIN_PATTERN         {0x0, 0x0, 0x1}
+
+#define MSG_DATA_DEFAULT_SIZE          256
+#define MSG_COUNT_THD                  16
+#define FRAME_COUNT_THD                        16
+
+#define VPU_WATCHDOG_INTERVAL_MS       1000
+#define VPU_ENC_HANG_THD               15
+
+#define VPU_FPS_STS_CNT                        3
+#define VPU_FPS_STS_THDS               {1, 3, 0}
+#define VPU_FPS_COEF                   100
+
+#define VPU_DETAIL_INDEX_DFT           0xffff
+
+#define VPU_MU_MAX_ADDRESS             0x40000000
+#define VPU_ENC_SEQ_CAPACITY           32
+#define VPU_ENC_INVALID_TIMESTAMP      0
+
+#endif
diff --git a/drivers/mxc/vpu_windsor/vpu_encoder_ctrl.c b/drivers/mxc/vpu_windsor/vpu_encoder_ctrl.c
new file mode 100644 (file)
index 0000000..37aee18
--- /dev/null
@@ -0,0 +1,609 @@
+/*
+ * Copyright(c) 2018 NXP. All rights reserved.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * @file vpu_encoder_ctrl.c
+ *
+ * Author Ming Qian<ming.qian@nxp.com>
+ */
+
+#define TAG    "[VPU Encoder Ctrl]\t "
+#include <media/v4l2-ctrls.h>
+
+#include "vpu_encoder_b0.h"
+#include "vpu_encoder_ctrl.h"
+
+// H264 level is maped like level 5.1 to uLevel 51, except level 1b to uLevel 14
+const u_int32 h264_level[] = {
+       [V4L2_MPEG_VIDEO_H264_LEVEL_1_0] = 10,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_1B]  = 14,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_1_1] = 11,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_1_2] = 12,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_1_3] = 13,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_2_0] = 20,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_2_1] = 21,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_2_2] = 22,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_3_0] = 30,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_3_1] = 31,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_3_2] = 32,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_4_0] = 40,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_4_1] = 41,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_4_2] = 42,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_5_0] = 50,
+       [V4L2_MPEG_VIDEO_H264_LEVEL_5_1] = 51
+};
+
+static int set_h264_profile(struct v4l2_ctrl *ctrl)
+{
+       struct vpu_ctx *ctx = v4l2_ctrl_to_ctx(ctrl);
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       pMEDIAIP_ENC_PARAM  param = &attr->param;
+
+       mutex_lock(&ctx->instance_mutex);
+       switch (ctrl->val) {
+       case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+               vpu_dbg(LVL_CTRL, "set h264 profile baseline\n");
+               param->eProfile = MEDIAIP_ENC_PROF_H264_BP;
+               break;
+       case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+               vpu_dbg(LVL_CTRL, "set h264 profile main\n");
+               param->eProfile = MEDIAIP_ENC_PROF_H264_MP;
+               break;
+       case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+               vpu_dbg(LVL_CTRL, "set h264 profile high\n");
+               param->eProfile = MEDIAIP_ENC_PROF_H264_HP;
+               break;
+       default:
+               vpu_err("not support H264 profile %d, set to main\n",
+                               ctrl->val);
+               param->eProfile = MEDIAIP_ENC_PROF_H264_MP;
+               break;
+       }
+       mutex_unlock(&ctx->instance_mutex);
+
+       return 0;
+}
+
+static int set_h264_level(struct v4l2_ctrl *ctrl)
+{
+       struct vpu_ctx *ctx = v4l2_ctrl_to_ctx(ctrl);
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       pMEDIAIP_ENC_PARAM  param = &attr->param;
+
+       mutex_lock(&ctx->instance_mutex);
+       param->uLevel = h264_level[ctrl->val];
+       mutex_unlock(&ctx->instance_mutex);
+
+       vpu_dbg(LVL_CTRL, "set h264 level to %d (%d)\n",
+                       ctrl->val, h264_level[ctrl->val]);
+
+       return 0;
+}
+
+static int set_bitrate_mode(struct v4l2_ctrl *ctrl)
+{
+       struct vpu_ctx *ctx = v4l2_ctrl_to_ctx(ctrl);
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       pMEDIAIP_ENC_PARAM  param = &attr->param;
+
+       mutex_lock(&ctx->instance_mutex);
+       switch (ctrl->val) {
+       case V4L2_MPEG_VIDEO_BITRATE_MODE_VBR:
+               vpu_dbg(LVL_CTRL, "set bitrate mode VBR\n");
+               param->eBitRateMode =
+                               MEDIAIP_ENC_BITRATECONTROLMODE_CONSTANT_QP;
+               break;
+       case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR:
+               vpu_dbg(LVL_CTRL, "set bitrate mode CBR\n");
+               param->eBitRateMode = MEDIAIP_ENC_BITRATECONTROLMODE_CBR;
+               break;
+       default:
+               vpu_err("not support bitrate mode %d, set to cbr\n",
+                               ctrl->val);
+               param->eBitRateMode = MEDIAIP_ENC_BITRATECONTROLMODE_CBR;
+               break;
+       }
+       mutex_unlock(&ctx->instance_mutex);
+
+       return 0;
+}
+
+static int set_bitrate(struct v4l2_ctrl *ctrl)
+{
+       struct vpu_ctx *ctx = v4l2_ctrl_to_ctx(ctrl);
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       pMEDIAIP_ENC_PARAM  param = &attr->param;
+
+       vpu_dbg(LVL_CTRL, "set bitrate %d\n", ctrl->val);
+       mutex_lock(&ctx->instance_mutex);
+       param->uTargetBitrate = ctrl->val / BITRATE_COEF;
+       if (param->uMaxBitRate < param->uTargetBitrate)
+               param->uMaxBitRate = param->uTargetBitrate;
+       mutex_unlock(&ctx->instance_mutex);
+
+       return 0;
+}
+
+static int set_bitrate_peak(struct v4l2_ctrl *ctrl)
+{
+       struct vpu_ctx *ctx = v4l2_ctrl_to_ctx(ctrl);
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       pMEDIAIP_ENC_PARAM  param = &attr->param;
+
+       vpu_dbg(LVL_CTRL, "set peak bitrate %d\n", ctrl->val);
+       mutex_lock(&ctx->instance_mutex);
+       param->uMaxBitRate = ctrl->val / BITRATE_COEF;
+       if (param->uTargetBitrate > param->uMaxBitRate)
+               param->uTargetBitrate = param->uMaxBitRate;
+       mutex_unlock(&ctx->instance_mutex);
+
+       return 0;
+}
+
+static int set_gop_size(struct v4l2_ctrl *ctrl)
+{
+       struct vpu_ctx *ctx = v4l2_ctrl_to_ctx(ctrl);
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       pMEDIAIP_ENC_PARAM  param = &attr->param;
+
+       vpu_dbg(LVL_CTRL, "set gop size %d\n", ctrl->val);
+       mutex_lock(&ctx->instance_mutex);
+       param->uIFrameInterval = ctrl->val;
+       mutex_unlock(&ctx->instance_mutex);
+
+       return 0;
+}
+
+static int set_i_period(struct v4l2_ctrl *ctrl)
+{
+       struct vpu_ctx *ctx = v4l2_ctrl_to_ctx(ctrl);
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       pMEDIAIP_ENC_PARAM  param = &attr->param;
+
+       vpu_dbg(LVL_CTRL, "set iframe interval %d\n", ctrl->val);
+       mutex_lock(&ctx->instance_mutex);
+       param->uIFrameInterval = ctrl->val;
+       mutex_unlock(&ctx->instance_mutex);
+
+       return 0;
+}
+
+static int get_gop_size(struct v4l2_ctrl *ctrl)
+{
+       struct vpu_ctx *ctx = v4l2_ctrl_to_ctx(ctrl);
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       pMEDIAIP_ENC_PARAM  param = &attr->param;
+
+       vpu_dbg(LVL_CTRL, "get gop size\n");
+       ctrl->val = param->uIFrameInterval;
+
+       return 0;
+}
+
+static int set_b_frames(struct v4l2_ctrl *ctrl)
+{
+       struct vpu_ctx *ctx = v4l2_ctrl_to_ctx(ctrl);
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       pMEDIAIP_ENC_PARAM  param = &attr->param;
+
+       vpu_dbg(LVL_CTRL, "set bframes %d\n", ctrl->val);
+       mutex_lock(&ctx->instance_mutex);
+       param->uGopBLength = ctrl->val;
+       mutex_unlock(&ctx->instance_mutex);
+
+       return 0;
+}
+
+static int set_qp(struct v4l2_ctrl *ctrl)
+{
+       struct vpu_ctx *ctx = v4l2_ctrl_to_ctx(ctrl);
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       pMEDIAIP_ENC_PARAM  param = &attr->param;
+
+       vpu_dbg(LVL_CTRL, "set qp %d\n", ctrl->val);
+       mutex_lock(&ctx->instance_mutex);
+       param->uInitSliceQP = ctrl->val;
+       mutex_unlock(&ctx->instance_mutex);
+
+       return 0;
+}
+
+static int get_min_buffers_for_output(struct v4l2_ctrl *ctrl)
+{
+       vpu_dbg(LVL_CTRL, "get min buffers for output\n");
+
+       ctrl->val = MIN_BUFFER_COUNT;
+
+       return 0;
+}
+
+static int set_display_re_ordering(struct v4l2_ctrl *ctrl)
+{
+       struct vpu_ctx *ctx = v4l2_ctrl_to_ctx(ctrl);
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+       pMEDIAIP_ENC_PARAM  param = &attr->param;
+
+       vpu_dbg(LVL_CTRL, "set lowlatencymode %d\n", ctrl->val);
+       mutex_lock(&ctx->instance_mutex);
+       if (ctrl->val)
+               param->uLowLatencyMode = 1;
+       else
+               param->uLowLatencyMode = 0;
+       mutex_unlock(&ctx->instance_mutex);
+
+       return 0;
+}
+
+static int set_force_key_frame(struct v4l2_ctrl *ctrl)
+{
+       struct vpu_ctx *ctx = v4l2_ctrl_to_ctx(ctrl);
+
+       vpu_dbg(LVL_CTRL, "force key frame\n");
+       set_bit(VPU_ENC_STATUS_KEY_FRAME, &ctx->status);
+
+       return 0;
+}
+
+static int add_ctrl_h264_profile(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops ctrl_h264_profile_ops = {
+               .s_ctrl = set_h264_profile,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrl_handler,
+                                     &ctrl_h264_profile_ops,
+                                     V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+                                     V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+                                     0xa,
+                                     V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE);
+       if (!ctrl) {
+               vpu_err("add ctrl h264 profile fail\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int add_ctrl_h264_level(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops ctrl_h264_level_ops = {
+               .s_ctrl = set_h264_level,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrl_handler,
+                                     &ctrl_h264_level_ops,
+                                     V4L2_CID_MPEG_VIDEO_H264_LEVEL,
+                                     V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
+                                     0x0,
+                                     V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
+       if (!ctrl) {
+               vpu_err("add ctrl h264 level fail\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int add_ctrl_bitrate_mode(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops ctrl_bitrate_mode_ops = {
+               .s_ctrl = set_bitrate_mode,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrl_handler,
+                                     &ctrl_bitrate_mode_ops,
+                                     V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+                                     V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
+                                     0x0,
+                                     V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
+       if (!ctrl) {
+               vpu_err("add ctrl bitrate mode fail\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int add_ctrl_bitrate(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops ctrl_bitrate_ops = {
+               .s_ctrl = set_bitrate,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+                                &ctrl_bitrate_ops,
+                                V4L2_CID_MPEG_VIDEO_BITRATE,
+                                BITRATE_LOW_THRESHOLD * BITRATE_COEF,
+                                BITRATE_HIGH_THRESHOLD * BITRATE_COEF,
+                                BITRATE_COEF,
+                                BITRATE_DEFAULT_TARGET * BITRATE_COEF);
+       if (!ctrl) {
+               vpu_err("add ctrl bitrate fail\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int add_ctrl_bitrate_peak(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops ctrl_bitrate_ops = {
+               .s_ctrl = set_bitrate_peak,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+                                &ctrl_bitrate_ops,
+                                V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
+                                BITRATE_LOW_THRESHOLD * BITRATE_COEF,
+                                BITRATE_HIGH_THRESHOLD * BITRATE_COEF,
+                                BITRATE_COEF,
+                                BITRATE_DEFAULT_PEAK * BITRATE_COEF);
+       if (!ctrl) {
+               vpu_err("add ctrl bitrate peak fail\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int add_ctrl_gop_size(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops ctrl_gop_ops = {
+               .s_ctrl = set_gop_size,
+               .g_volatile_ctrl = get_gop_size,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+                                &ctrl_gop_ops,
+                                V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+                                GOP_L_THRESHOLD,
+                                GOP_H_THRESHOLD,
+                                1,
+                                GOP_DEFAULT);
+       if (!ctrl) {
+               vpu_err("add ctrl gop size fail\n");
+               return -EINVAL;
+       }
+
+       ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+       ctrl->flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+
+       return 0;
+}
+
+static int add_ctrl_i_period(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops ctrl_i_period_ops = {
+               .s_ctrl = set_i_period,
+               .g_volatile_ctrl = get_gop_size,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+                       &ctrl_i_period_ops,
+                       V4L2_CID_MPEG_VIDEO_H264_I_PERIOD,
+                       GOP_L_THRESHOLD,
+                       GOP_H_THRESHOLD,
+                       1,
+                       GOP_DEFAULT);
+
+       if (!ctrl) {
+               vpu_err("add ctrl i period fail\n");
+               return -EINVAL;
+       }
+
+       ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+       ctrl->flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
+
+       return 0;
+}
+
+static int add_ctrl_b_frames(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops ctrl_b_frames = {
+               .s_ctrl = set_b_frames,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+                       &ctrl_b_frames,
+                       V4L2_CID_MPEG_VIDEO_B_FRAMES,
+                       BFRAMES_L_THRESHOLD,
+                       BFRAMES_H_THRESHOLD,
+                       1,
+                       BFRAMES_DEFAULT);
+
+       if (!ctrl) {
+               vpu_err("add ctrl b frames fail\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int add_ctrl_i_frame_qp(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops ctrl_iframe_qp_ops = {
+               .s_ctrl = set_qp,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+                                &ctrl_iframe_qp_ops,
+                                V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP,
+                                QP_MIN,
+                                QP_MAX,
+                                1,
+                                QP_DEFAULT);
+       if (!ctrl) {
+               vpu_err("add ctrl h264 I frame qp fail\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int add_ctrl_p_frame_qp(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops ctrl_pframe_qp_ops = {
+               .s_ctrl = set_qp,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+                                &ctrl_pframe_qp_ops,
+                                V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP,
+                                QP_MIN,
+                                QP_MAX,
+                                1,
+                                QP_DEFAULT);
+       if (!ctrl) {
+               vpu_err("add ctrl h264 P frame qp fail\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int add_ctrl_b_frame_qp(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops ctrl_bframe_qp_ops = {
+               .s_ctrl = set_qp,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+                                &ctrl_bframe_qp_ops,
+                                V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP,
+                                QP_MIN,
+                                QP_MAX,
+                                1,
+                                QP_DEFAULT);
+       if (!ctrl) {
+               vpu_err("add ctrl h264 B frame qp fail\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int add_ctrl_min_buffers_for_output(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops ctrl_min_buffers_ops = {
+               .g_volatile_ctrl = get_min_buffers_for_output,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+                                &ctrl_min_buffers_ops,
+                                V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
+                                1,
+                                32,
+                                1,
+                                MIN_BUFFER_COUNT);
+       if (!ctrl) {
+               vpu_err("add ctrl min buffers for output fail\n");
+               return -EINVAL;
+       }
+
+       ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+       return 0;
+}
+
+static int add_ctrl_display_re_ordering(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops re_ordering_ops = {
+               .s_ctrl = set_display_re_ordering,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+                       &re_ordering_ops,
+                       V4L2_CID_MPEG_VIDEO_H264_ASO,
+                       0, 1, 1, 1);
+       if (!ctrl) {
+               vpu_err("add ctrl display re ordering fail\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int add_ctrl_force_key_frame(struct vpu_ctx *ctx)
+{
+       static const struct v4l2_ctrl_ops force_key_frame_ops = {
+               .s_ctrl = set_force_key_frame,
+       };
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+                       &force_key_frame_ops,
+                       V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME,
+                       0, 0, 0, 0);
+       if (!ctrl) {
+               vpu_err("add ctrl force key frame fail\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int vpu_enc_register_ctrls(struct vpu_ctx *ctx)
+{
+       add_ctrl_h264_profile(ctx);
+       add_ctrl_h264_level(ctx);
+       add_ctrl_bitrate_mode(ctx);
+       add_ctrl_bitrate(ctx);
+       add_ctrl_bitrate_peak(ctx);
+       add_ctrl_gop_size(ctx);
+       add_ctrl_i_period(ctx);
+       add_ctrl_b_frames(ctx);
+       add_ctrl_i_frame_qp(ctx);
+       add_ctrl_p_frame_qp(ctx);
+       add_ctrl_b_frame_qp(ctx);
+       add_ctrl_min_buffers_for_output(ctx);
+       add_ctrl_display_re_ordering(ctx);
+       add_ctrl_force_key_frame(ctx);
+
+       return 0;
+}
+
+int vpu_enc_setup_ctrls(struct vpu_ctx *ctx)
+{
+       vpu_log_func();
+
+       v4l2_ctrl_handler_init(&ctx->ctrl_handler, 11);
+       vpu_enc_register_ctrls(ctx);
+       if (ctx->ctrl_handler.error) {
+               vpu_err("control initialization error (%d)\n",
+                       ctx->ctrl_handler.error);
+               return -EINVAL;
+       }
+       ctx->ctrl_inited = true;
+       return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+}
+
+int vpu_enc_free_ctrls(struct vpu_ctx *ctx)
+{
+       vpu_log_func();
+
+       if (ctx->ctrl_inited) {
+               v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+               ctx->ctrl_inited = false;
+       }
+
+       return 0;
+}
diff --git a/drivers/mxc/vpu_windsor/vpu_encoder_ctrl.h b/drivers/mxc/vpu_windsor/vpu_encoder_ctrl.h
new file mode 100644 (file)
index 0000000..5fc9818
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright(c) 2018 NXP. All rights reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * vpu_encoder_ctrl.h
+ *
+ * Author Ming Qian<ming.qian@nxp.com>
+ */
+#ifndef _VPU_ENCODER_CTRL_H
+#define _VPU_ENCODER_CTRL_H
+
+#include "mediasys_types.h"
+
+int vpu_enc_setup_ctrls(struct vpu_ctx *ctx);
+int vpu_enc_free_ctrls(struct vpu_ctx *ctx);
+
+#endif
diff --git a/drivers/mxc/vpu_windsor/vpu_encoder_mem.c b/drivers/mxc/vpu_windsor/vpu_encoder_mem.c
new file mode 100644 (file)
index 0000000..ab8d09d
--- /dev/null
@@ -0,0 +1,619 @@
+/*
+ * Copyright(c) 2018 NXP. All rights reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * vpu_encoder_mem.c
+ *
+ * Author Ming Qian<ming.qian@nxp.com>
+ */
+
+#define TAG    "[VPU Encoder Mem]\t "
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include "vpu_encoder_config.h"
+#include "vpu_encoder_b0.h"
+#include "vpu_encoder_mem.h"
+
+int vpu_enc_init_reserved_memory(struct vpu_enc_mem_info *info)
+{
+       if (!info || !info->phy_addr || !info->size)
+               return -EINVAL;
+
+       info->virt_addr = ioremap_wc(info->phy_addr, info->size);
+       if (!info->virt_addr)
+               return -EINVAL;
+       memset_io(info->virt_addr, 0, info->size);
+       info->bytesused = 0;
+       INIT_LIST_HEAD(&info->memorys);
+       spin_lock_init(&info->lock);
+
+       return 0;
+}
+
+void vpu_enc_release_reserved_memory(struct vpu_enc_mem_info *info)
+{
+       struct vpu_enc_mem_item *item = NULL;
+       struct vpu_enc_mem_item *tmp = NULL;
+
+       if (!info)
+               return;
+
+       spin_lock(&info->lock);
+       list_for_each_entry_safe(item, tmp, &info->memorys, list) {
+               list_del_init(&item->list);
+               info->bytesused -= item->size;
+               vpu_dbg(LVL_MEM, "free reserved memory %ld\n", item->size);
+               VPU_SAFE_RELEASE(item, vfree);
+       }
+       spin_unlock(&info->lock);
+
+       if (info->virt_addr) {
+               iounmap(info->virt_addr);
+               info->virt_addr = NULL;
+       }
+}
+
+int vpu_enc_alloc_reserved_mem(struct vpu_enc_mem_info *info,
+                               struct buffer_addr *buffer)
+{
+       struct vpu_enc_mem_item *item = NULL;
+       struct list_head *pos = NULL;
+       unsigned long offset = 0;
+       int ret;
+
+       if (!info || !buffer)
+               return -EINVAL;
+
+       spin_lock(&info->lock);
+       if (buffer->size + info->bytesused > info->size) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+
+       list_for_each_entry(item, &info->memorys, list) {
+               if (item->offset - offset >= buffer->size) {
+                       pos = &item->list;
+                       break;
+               }
+               offset = item->offset + item->size;
+       }
+       if (!pos && info->size - offset >= buffer->size)
+               pos = &info->memorys;
+       if (!pos) {
+               ret = -ENOMEM;
+               goto exit;
+       }
+       item = vzalloc(sizeof(*item));
+       if (!item) {
+               ret = -EINVAL;
+               goto exit;
+       }
+       item->offset = offset;
+       item->virt_addr = info->virt_addr + offset;
+       item->phy_addr = info->phy_addr + offset;
+       item->size = buffer->size;
+       list_add_tail(&item->list, pos);
+       info->bytesused += buffer->size;
+       vpu_dbg(LVL_MEM, "alloc reserved memory <0x%lx 0x%lx(%ld)>\n",
+                       item->phy_addr, item->size, item->size);
+       buffer->virt_addr = item->virt_addr;
+       buffer->phy_addr = item->phy_addr;
+       ret = 0;
+exit:
+       spin_unlock(&info->lock);
+       return ret;
+}
+
+int vpu_enc_free_reserved_mem(struct vpu_enc_mem_info *info,
+                               struct buffer_addr *buffer)
+{
+       struct vpu_enc_mem_item *item = NULL;
+       struct vpu_enc_mem_item *tmp = NULL;
+       unsigned long offset;
+       int ret = -EINVAL;
+
+       if (!info || !buffer)
+               return -EINVAL;
+       if (!buffer->virt_addr)
+               return 0;
+
+       if (buffer->phy_addr < info->phy_addr) {
+               vpu_err("invalid reserved memory addr : 0x%llx %d\n",
+                               buffer->phy_addr, buffer->size);
+               return -EINVAL;
+       }
+
+       offset = buffer->phy_addr - info->phy_addr;
+       if (offset + buffer->size > info->size) {
+               vpu_err("invalid reserved memory addr : 0x%llx %d\n",
+                               buffer->phy_addr, buffer->size);
+               return -EINVAL;
+       }
+
+       spin_lock(&info->lock);
+       list_for_each_entry_safe(item, tmp, &info->memorys, list) {
+               if (offset < item->offset)
+                       continue;
+               if (offset + buffer->size > item->offset + item->size)
+                       continue;
+               list_del_init(&item->list);
+               info->bytesused -= item->size;
+               vpu_dbg(LVL_MEM, "free reserved memory <0x%lx 0x%lx(%ld)>\n",
+                       item->phy_addr, item->size, item->size);
+               VPU_SAFE_RELEASE(item, vfree);
+               ret = 0;
+               break;
+       }
+       spin_unlock(&info->lock);
+
+       return ret;
+}
+
+void vpu_enc_add_dma_size(struct vpu_attr *attr, unsigned long size)
+{
+       if (!attr)
+               return;
+
+       atomic64_add(size, &attr->total_dma_size);
+}
+
+void vpu_enc_sub_dma_size(struct vpu_attr *attr, unsigned long size)
+{
+       if (!attr)
+               return;
+
+       atomic64_sub(size, &attr->total_dma_size);
+}
+
+int vpu_enc_alloc_dma_buffer(struct vpu_ctx *ctx, struct buffer_addr *buffer)
+{
+       if (!ctx || !ctx->dev || !buffer || !buffer->size)
+               return -EINVAL;
+
+       vpu_dbg(LVL_MEM, "alloc coherent dma %d\n", buffer->size);
+       buffer->virt_addr = dma_alloc_coherent(ctx->dev->generic_dev,
+                                               buffer->size,
+                                               (dma_addr_t *)&buffer->phy_addr,
+                                               GFP_KERNEL | GFP_DMA32);
+       if (!buffer->virt_addr) {
+               vpu_err("encoder alloc coherent dma(%d) fail\n",
+                               buffer->size);
+               return -ENOMEM;
+       }
+       memset_io(buffer->virt_addr, 0, buffer->size);
+       vpu_enc_add_dma_size(get_vpu_ctx_attr(ctx), buffer->size);
+
+       return 0;
+}
+
+void vpu_enc_init_dma_buffer(struct buffer_addr *buffer)
+{
+       if (!buffer)
+               return;
+
+       buffer->virt_addr = NULL;
+       buffer->phy_addr = 0;
+       buffer->size = 0;
+}
+
+int vpu_enc_free_dma_buffer(struct vpu_ctx *ctx, struct buffer_addr *buffer)
+{
+       if (!ctx || !ctx->dev || !buffer)
+               return -EINVAL;
+
+       if (!buffer->virt_addr)
+               return 0;
+
+       vpu_dbg(LVL_MEM, "free coherent dma %d\n", buffer->size);
+       vpu_enc_sub_dma_size(get_vpu_ctx_attr(ctx), buffer->size);
+       dma_free_coherent(ctx->dev->generic_dev, buffer->size,
+                               buffer->virt_addr, buffer->phy_addr);
+
+       vpu_enc_init_dma_buffer(buffer);
+
+       return 0;
+}
+
+static bool check_mem_resource_is_valid(MEDIAIP_ENC_MEM_RESOURCE *resource)
+{
+       if (!resource)
+               return false;
+       if (resource->uMemVirtAddr >= VPU_MU_MAX_ADDRESS)
+               return false;
+       if (resource->uMemVirtAddr + resource->uMemSize > VPU_MU_MAX_ADDRESS)
+               return false;
+       return true;
+}
+
+static u32 get_enc_alloc_size(u32 size)
+{
+       u32 esize = ALIGN(size, PAGE_SIZE);
+
+       if (esize < size + sizeof(u32))
+               esize += PAGE_SIZE;
+
+       return esize;
+}
+
+static int alloc_mem_res(struct vpu_ctx *ctx, struct buffer_addr *buffer,
+                       MEDIAIP_ENC_MEM_RESOURCE *resource, u32 size)
+{
+       int ret;
+
+       if (!ctx || !buffer || !resource)
+               return -EINVAL;
+
+       if (!size) {
+               vpu_err("invalid memory resource size : %d\n", size);
+               return -EINVAL;
+       }
+
+       buffer->size = get_enc_alloc_size(size);
+       ret = vpu_enc_alloc_dma_buffer(ctx, buffer);
+       if (ret)
+               return ret;
+
+       resource->uMemPhysAddr = buffer->phy_addr;
+       resource->uMemVirtAddr = cpu_phy_to_mu(ctx->core_dev, buffer->phy_addr);
+       resource->uMemSize = size;
+
+       return 0;
+}
+
+static int free_mem_res(struct vpu_ctx *ctx, struct buffer_addr *buffer,
+                       MEDIAIP_ENC_MEM_RESOURCE *resource)
+{
+       if (!ctx || !buffer || !resource)
+               return -EINVAL;
+
+       vpu_enc_free_dma_buffer(ctx, buffer);
+
+       resource->uMemPhysAddr = 0;
+       resource->uMemVirtAddr = 0;
+       resource->uMemSize = 0;
+
+       return 0;
+}
+
+static int alloc_reserved_mem_res(struct vpu_ctx *ctx,
+                               struct buffer_addr *buffer,
+                               MEDIAIP_ENC_MEM_RESOURCE *resource,
+                               u32 size)
+{
+       int ret;
+
+       if (!ctx || !ctx->dev || !buffer || !resource)
+               return -EINVAL;
+
+       if (!size) {
+               vpu_err("invalid memory resource size : %d\n", size);
+               return -EINVAL;
+       }
+
+       buffer->size = get_enc_alloc_size(size);
+       ret = vpu_enc_alloc_reserved_mem(&ctx->dev->reserved_mem, buffer);
+       if (ret)
+               return ret;
+
+       resource->uMemPhysAddr = buffer->phy_addr;
+       resource->uMemVirtAddr = cpu_phy_to_mu(ctx->core_dev, buffer->phy_addr);
+       resource->uMemSize = size;
+
+       return 0;
+}
+
+static int free_reserved_mem_res(struct vpu_ctx *ctx,
+                               struct buffer_addr *buffer,
+                               MEDIAIP_ENC_MEM_RESOURCE *resource)
+{
+       if (!ctx || !ctx->dev || !buffer || !resource)
+               return -EINVAL;
+
+       vpu_enc_free_reserved_mem(&ctx->dev->reserved_mem, buffer);
+
+       resource->uMemPhysAddr = 0;
+       resource->uMemVirtAddr = 0;
+       resource->uMemSize = 0;
+
+       return 0;
+}
+
+static int free_enc_frames(struct vpu_ctx *ctx, pMEDIAIP_ENC_MEM_POOL pool)
+{
+       int i;
+
+       vpu_log_func();
+       for (i = 0; i < ctx->mem_req.uEncFrmNum; i++)
+               free_mem_res(ctx, &ctx->encFrame[i],
+                               &pool->tEncFrameBuffers[i]);
+
+       return 0;
+}
+
+static int alloc_enc_frames(struct vpu_ctx *ctx, pMEDIAIP_ENC_MEM_POOL pool)
+{
+       int i;
+       int ret;
+
+       vpu_log_func();
+       for (i = 0; i < ctx->mem_req.uEncFrmNum; i++) {
+               ret = alloc_mem_res(ctx,
+                               &ctx->encFrame[i],
+                               &pool->tEncFrameBuffers[i],
+                               ctx->mem_req.uEncFrmSize);
+               if (ret) {
+                       vpu_err("alloc enc frame[%d] fail\n", i);
+                       goto error;
+               }
+               vpu_dbg(LVL_MEM, "encFrame[%d]: 0x%llx,%d(%d)\n", i,
+                               ctx->encFrame[i].phy_addr,
+                               ctx->mem_req.uEncFrmSize,
+                               ctx->encFrame[i].size);
+       }
+
+       return 0;
+error:
+       free_enc_frames(ctx, pool);
+       return ret;
+}
+
+static int free_ref_frames(struct vpu_ctx *ctx, pMEDIAIP_ENC_MEM_POOL pool)
+{
+       int i;
+
+       vpu_log_func();
+       for (i = 0; i < ctx->mem_req.uRefFrmNum; i++)
+               free_mem_res(ctx, &ctx->refFrame[i],
+                               &pool->tRefFrameBuffers[i]);
+
+       return 0;
+}
+
+static int alloc_ref_frames(struct vpu_ctx *ctx, pMEDIAIP_ENC_MEM_POOL pool)
+{
+       int i;
+       int ret;
+
+       vpu_log_func();
+       for (i = 0; i < ctx->mem_req.uRefFrmNum; i++) {
+               ret = alloc_mem_res(ctx,
+                               &ctx->refFrame[i],
+                               &pool->tRefFrameBuffers[i],
+                               ctx->mem_req.uRefFrmSize);
+               if (ret) {
+                       vpu_err("alloc ref frame[%d] fail\n", i);
+                       goto error;
+               }
+               vpu_dbg(LVL_MEM, "refFrame[%d]: 0x%llx,%d(%d)\n", i,
+                               ctx->refFrame[i].phy_addr,
+                               ctx->mem_req.uRefFrmSize,
+                               ctx->refFrame[i].size);
+       }
+
+       return 0;
+error:
+       free_ref_frames(ctx, pool);
+       return ret;
+}
+
+static int free_act_frame(struct vpu_ctx *ctx, pMEDIAIP_ENC_MEM_POOL pool)
+{
+       if (!ctx || !pool)
+               return -EINVAL;
+
+       vpu_log_func();
+       free_reserved_mem_res(ctx, &ctx->actFrame, &pool->tActFrameBufferArea);
+
+       return 0;
+}
+
+static int alloc_act_frame(struct vpu_ctx *ctx, pMEDIAIP_ENC_MEM_POOL pool)
+{
+       int ret = 0;
+
+       vpu_log_func();
+       ret = alloc_reserved_mem_res(ctx,
+                       &ctx->actFrame,
+                       &pool->tActFrameBufferArea,
+                       ctx->mem_req.uActBufSize);
+       if (ret) {
+               vpu_err("alloc act frame fail\n");
+               return ret;
+       }
+
+       if (!check_mem_resource_is_valid(&pool->tActFrameBufferArea)) {
+               vpu_err("invalid actFrames address, 0x%x, 0x%x, 0x%x\n",
+                               pool->tActFrameBufferArea.uMemPhysAddr,
+                               pool->tActFrameBufferArea.uMemVirtAddr,
+                               pool->tActFrameBufferArea.uMemSize);
+               free_act_frame(ctx, pool);
+               return -EINVAL;
+       }
+
+       vpu_dbg(LVL_MEM, "actFrame: 0x%llx, %d(%d)\n",
+                       ctx->actFrame.phy_addr,
+                       ctx->mem_req.uActBufSize,
+                       ctx->actFrame.size);
+       return 0;
+}
+
+static void set_mem_pattern(u32 *ptr)
+{
+       if (!ptr)
+               return;
+       *ptr = VPU_MEM_PATTERN;
+}
+
+static int check_mem_pattern(u32 *ptr)
+{
+       if (!ptr)
+               return -EINVAL;
+
+       if (*ptr != VPU_MEM_PATTERN)
+               return -EINVAL;
+
+       return 0;
+}
+
+static void vpu_enc_set_mem_pattern(struct vpu_ctx *ctx)
+{
+       int i;
+
+       if (!ctx)
+               return;
+
+       for (i = 0; i < MEDIAIP_MAX_NUM_WINDSOR_SRC_FRAMES; i++) {
+               if (!ctx->encFrame[i].virt_addr)
+                       continue;
+               set_mem_pattern(ctx->encFrame[i].virt_addr +
+                               ctx->mem_req.uEncFrmSize);
+       }
+
+       for (i = 0; i < MEDIAIP_MAX_NUM_WINDSOR_REF_FRAMES; i++) {
+               if (!ctx->refFrame[i].virt_addr)
+                       continue;
+               set_mem_pattern(ctx->refFrame[i].virt_addr +
+                               ctx->mem_req.uRefFrmSize);
+       }
+
+       if (ctx->actFrame.virt_addr)
+               set_mem_pattern(ctx->actFrame.virt_addr +
+                               ctx->mem_req.uActBufSize);
+}
+
+int vpu_enc_check_mem_overstep(struct vpu_ctx *ctx)
+{
+       int i;
+       int ret;
+       int flag = 0;
+
+       if (!ctx)
+               return -EINVAL;
+
+       for (i = 0; i < MEDIAIP_MAX_NUM_WINDSOR_SRC_FRAMES; i++) {
+               if (!ctx->encFrame[i].virt_addr)
+                       continue;
+               ret = check_mem_pattern(ctx->encFrame[i].virt_addr +
+                                       ctx->mem_req.uEncFrmSize);
+               if (ret) {
+                       vpu_err("***error:[%d][%d]encFrame[%d] out of bounds\n",
+                                       ctx->core_dev->id, ctx->str_index, i);
+                       flag = 1;
+               }
+       }
+
+       for (i = 0; i < MEDIAIP_MAX_NUM_WINDSOR_REF_FRAMES; i++) {
+               if (!ctx->refFrame[i].virt_addr)
+                       continue;
+               ret = check_mem_pattern(ctx->refFrame[i].virt_addr +
+                                       ctx->mem_req.uRefFrmSize);
+               if (ret) {
+                       vpu_err("***error:[%d][%d]refFrame[%d] out of bounds\n",
+                                       ctx->core_dev->id, ctx->str_index, i);
+                       flag = 1;
+               }
+       }
+
+       if (ctx->actFrame.virt_addr) {
+               ret = check_mem_pattern(ctx->actFrame.virt_addr +
+                                       ctx->mem_req.uActBufSize);
+               if (ret) {
+                       vpu_err("***error:[%d][%d]actFrame out of bounds\n",
+                                       ctx->core_dev->id, ctx->str_index);
+                       flag = 1;
+               }
+       }
+
+       if (flag) {
+               vpu_err("Error:Memory out of bounds in [%d][%d]\n",
+                       ctx->core_dev->id, ctx->str_index);
+               vpu_enc_set_mem_pattern(ctx);
+       }
+
+       return 0;
+}
+
+int vpu_enc_alloc_mem(struct vpu_ctx *ctx,
+                       MEDIAIP_ENC_MEM_REQ_DATA *req_data,
+                       pMEDIAIP_ENC_MEM_POOL pool)
+{
+       int ret;
+
+       if (!ctx || !req_data || !pool)
+               return -EINVAL;
+
+       if (ctx->mem_req.uEncFrmSize < req_data->uEncFrmSize ||
+                       ctx->mem_req.uEncFrmNum < req_data->uEncFrmNum) {
+               free_enc_frames(ctx, pool);
+               ctx->mem_req.uEncFrmSize = req_data->uEncFrmSize;
+               ctx->mem_req.uEncFrmNum = req_data->uEncFrmNum;
+               ret = alloc_enc_frames(ctx, pool);
+               if (ret)
+                       return ret;
+       }
+
+       if (ctx->mem_req.uRefFrmSize < req_data->uRefFrmSize ||
+                       ctx->mem_req.uRefFrmNum < req_data->uRefFrmNum) {
+               free_ref_frames(ctx, pool);
+               ctx->mem_req.uRefFrmSize = req_data->uRefFrmSize;
+               ctx->mem_req.uRefFrmNum = req_data->uRefFrmNum;
+               ret = alloc_ref_frames(ctx, pool);
+               if (ret)
+                       goto error_alloc_refs;
+       }
+
+       if (ctx->mem_req.uActBufSize < req_data->uActBufSize) {
+               free_act_frame(ctx, pool);
+               ctx->mem_req.uActBufSize = req_data->uActBufSize;
+               ret = alloc_act_frame(ctx, pool);
+               if (ret)
+                       goto error_alloc_act;
+       }
+
+       vpu_enc_set_mem_pattern(ctx);
+
+       return 0;
+error_alloc_act:
+       free_ref_frames(ctx, pool);
+error_alloc_refs:
+       free_enc_frames(ctx, pool);
+       return ret;
+}
+
+int vpu_enc_free_mem(struct vpu_ctx *ctx, pMEDIAIP_ENC_MEM_POOL pool)
+{
+       if (!ctx || !pool)
+               return -EINVAL;
+
+       free_act_frame(ctx, pool);
+       free_ref_frames(ctx, pool);
+       free_enc_frames(ctx, pool);
+
+       return 0;
+}
+
+int vpu_enc_alloc_stream(struct vpu_ctx *ctx)
+{
+       int ret;
+
+       if (ctx->encoder_stream.virt_addr)
+               return 0;
+
+       ctx->encoder_stream.size = STREAM_SIZE;
+       ret = vpu_enc_alloc_dma_buffer(ctx, &ctx->encoder_stream);
+       if (ret) {
+               vpu_err("alloc encoder stream buffer fail\n");
+               return -ENOMEM;
+       }
+       vpu_dbg(LVL_MEM, "encoder_stream: 0x%llx, %d\n",
+                       ctx->encoder_stream.phy_addr, ctx->encoder_stream.size);
+
+       return 0;
+}
+
+void vpu_enc_free_stream(struct vpu_ctx *ctx)
+{
+       vpu_enc_free_dma_buffer(ctx, &ctx->encoder_stream);
+}
diff --git a/drivers/mxc/vpu_windsor/vpu_encoder_mem.h b/drivers/mxc/vpu_windsor/vpu_encoder_mem.h
new file mode 100644 (file)
index 0000000..2423943
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright(c) 2018 NXP. All rights reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * vpu_encoder_mem.h
+ *
+ * Author Ming Qian<ming.qian@nxp.com>
+ */
+#ifndef _VPU_ENCODER_MEM_H
+#define _VPU_ENCODER_MEM_H
+
+#include "vpu_encoder_b0.h"
+
+int vpu_enc_init_reserved_memory(struct vpu_enc_mem_info *info);
+void vpu_enc_release_reserved_memory(struct vpu_enc_mem_info *info);
+void vpu_enc_add_dma_size(struct vpu_attr *attr, unsigned long size);
+void vpu_enc_sub_dma_size(struct vpu_attr *attr, unsigned long size);
+int vpu_enc_alloc_dma_buffer(struct vpu_ctx *ctx, struct buffer_addr *buffer);
+int vpu_enc_free_dma_buffer(struct vpu_ctx *ctx, struct buffer_addr *buffer);
+void vpu_enc_init_dma_buffer(struct buffer_addr *buffer);
+int vpu_enc_check_mem_overstep(struct vpu_ctx *ctx);
+int vpu_enc_alloc_mem(struct vpu_ctx *ctx,
+                       MEDIAIP_ENC_MEM_REQ_DATA *req_data,
+                       pMEDIAIP_ENC_MEM_POOL pool);
+int vpu_enc_free_mem(struct vpu_ctx *ctx, pMEDIAIP_ENC_MEM_POOL pool);
+int vpu_enc_alloc_stream(struct vpu_ctx *ctx);
+void vpu_enc_free_stream(struct vpu_ctx *ctx);
+
+#endif
diff --git a/drivers/mxc/vpu_windsor/vpu_encoder_rpc.c b/drivers/mxc/vpu_windsor/vpu_encoder_rpc.c
new file mode 100644 (file)
index 0000000..f670a94
--- /dev/null
@@ -0,0 +1,448 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 NXP. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include "vpu_encoder_rpc.h"
+
+void rpc_init_shared_memory_encoder(struct shared_addr *This,
+               unsigned long long base_phy_addr,
+               void *base_virt_addr,
+               u_int32 total_size,
+               u32 *actual_size)
+{
+       pENC_RPC_HOST_IFACE pSharedInterface;
+       unsigned int phy_addr;
+       unsigned int i;
+       unsigned int temp_addr;
+       BUFFER_DESCRIPTOR_TYPE *pSharedCmdBufDescPtr;
+       BUFFER_DESCRIPTOR_TYPE *pSharedMsgBufDescPtr;
+       pMEDIA_ENC_API_CONTROL_INTERFACE pEncCtrlInterface;
+
+       This->shared_mem_phy = base_phy_addr;
+       This->shared_mem_vir = base_virt_addr;
+       This->base_offset = (unsigned long long)(base_virt_addr - base_phy_addr);
+
+       pSharedInterface = (pENC_RPC_HOST_IFACE)This->shared_mem_vir;
+       This->pSharedInterface = pSharedInterface;
+
+       pSharedInterface->FwExecBaseAddr = base_phy_addr;
+       pSharedInterface->FwExecAreaSize = total_size;
+
+       pSharedCmdBufDescPtr = (BUFFER_DESCRIPTOR_TYPE *)&pSharedInterface->StreamCmdBufferDesc;
+       pSharedMsgBufDescPtr = (BUFFER_DESCRIPTOR_TYPE *)&pSharedInterface->StreamMsgBufferDesc;
+
+       phy_addr = base_phy_addr + sizeof(ENC_RPC_HOST_IFACE);
+       This->cmd_mem_phy = phy_addr;
+       This->cmd_mem_vir = This->shared_mem_vir + sizeof(ENC_RPC_HOST_IFACE);
+
+       pSharedCmdBufDescPtr->wptr = phy_addr;
+       pSharedCmdBufDescPtr->rptr = pSharedCmdBufDescPtr->wptr;
+       pSharedCmdBufDescPtr->start = pSharedCmdBufDescPtr->wptr;
+       pSharedCmdBufDescPtr->end = pSharedCmdBufDescPtr->start + CMD_SIZE;
+
+       phy_addr += CMD_SIZE;
+       This->msg_mem_phy = phy_addr;
+       This->msg_mem_vir = This->cmd_mem_vir + CMD_SIZE;
+
+       pSharedMsgBufDescPtr->wptr = phy_addr;
+       pSharedMsgBufDescPtr->rptr = pSharedMsgBufDescPtr->wptr;
+       pSharedMsgBufDescPtr->start = pSharedMsgBufDescPtr->wptr;
+       pSharedMsgBufDescPtr->end = pSharedMsgBufDescPtr->start + MSG_SIZE;
+
+       phy_addr += MSG_SIZE;
+
+       for (i = 0; i < VID_API_NUM_STREAMS; i++) {
+               pSharedInterface->pEncCtrlInterface[i] = phy_addr;
+               phy_addr += sizeof(MEDIA_ENC_API_CONTROL_INTERFACE);
+       }
+
+       for (i = 0; i < VID_API_NUM_STREAMS; i++) {
+               temp_addr = pSharedInterface->pEncCtrlInterface[i];
+               pEncCtrlInterface = (pMEDIA_ENC_API_CONTROL_INTERFACE)(temp_addr + This->base_offset);
+               pEncCtrlInterface->pEncYUVBufferDesc = phy_addr;
+               phy_addr += sizeof(MEDIAIP_ENC_YUV_BUFFER_DESC);
+               pEncCtrlInterface->pEncStreamBufferDesc = phy_addr;
+               phy_addr += sizeof(BUFFER_DESCRIPTOR_TYPE);
+               pEncCtrlInterface->pEncExpertModeParam = phy_addr;
+               phy_addr += sizeof(MEDIAIP_ENC_EXPERT_MODE_PARAM);
+               pEncCtrlInterface->pEncParam = phy_addr;
+               phy_addr += sizeof(MEDIAIP_ENC_PARAM);
+               pEncCtrlInterface->pEncMemPool = phy_addr;
+               phy_addr += sizeof(MEDIAIP_ENC_MEM_POOL);
+               pEncCtrlInterface->pEncEncodingStatus = phy_addr;
+               phy_addr += sizeof(ENC_ENCODING_STATUS);
+               pEncCtrlInterface->pEncDSAStatus = phy_addr;
+               phy_addr += sizeof(ENC_DSA_STATUS_t);
+       }
+       if (actual_size)
+               *actual_size = phy_addr - base_phy_addr;
+}
+
+void rpc_set_system_cfg_value_encoder(void *Interface, u_int32 regs_base, u_int32 core_id)
+{
+       pENC_RPC_HOST_IFACE pSharedInterface;
+       MEDIAIP_FW_SYSTEM_CONFIG *pSystemCfg;
+
+       pSharedInterface = (pENC_RPC_HOST_IFACE)Interface;
+       pSystemCfg = &pSharedInterface->sSystemCfg;
+       pSystemCfg->uNumWindsors = 1;
+       pSystemCfg->uWindsorIrqPin[0x0][0x0] = 0x4; // PAL_IRQ_WINDSOR_LOW
+       pSystemCfg->uWindsorIrqPin[0x0][0x1] = 0x5; // PAL_IRQ_WINDSOR_HI
+       pSystemCfg->uMaloneBaseAddress[0] = (unsigned int)(regs_base + 0x180000);
+       if (core_id == 0)
+               pSystemCfg->uWindsorBaseAddress[0] = (unsigned int)(regs_base + 0x800000);
+       else
+               pSystemCfg->uWindsorBaseAddress[0] = (unsigned int)(regs_base + 0xa00000);
+       pSystemCfg->uMaloneBaseAddress[0x1] = 0x0;
+       pSystemCfg->uHifOffset[0x0] = 0x1C000;
+       pSystemCfg->uHifOffset[0x1] = 0x0;
+
+       pSystemCfg->uDPVBaseAddr = 0x0;
+       pSystemCfg->uDPVIrqPin = 0x0;
+       pSystemCfg->uPixIfBaseAddr = (unsigned int)(regs_base + 0x180000 + 0x20000);
+       pSystemCfg->uFSLCacheBaseAddr[0] = (unsigned int)(regs_base + 0x60000);
+       pSystemCfg->uFSLCacheBaseAddr[1] = (unsigned int)(regs_base + 0x68000);
+}
+
+u_int32 rpc_MediaIPFW_Video_buffer_space_check_encoder(BUFFER_DESCRIPTOR_TYPE *pBufDesc,
+       BOOL bFull,
+       u_int32 uSize,
+       u_int32 *puUpdateAddress)
+{
+       u_int32 uPtr1;
+       u_int32 uPtr2;
+       u_int32 start;
+       u_int32 end;
+       u_int32 uTemp;
+
+       /* bFull is FALSE when send message, write data   */
+       /* bFull is TRUE when process commands, read data */
+       uPtr1 = (bFull) ? pBufDesc->rptr : pBufDesc->wptr;
+       uPtr2 = (bFull) ? pBufDesc->wptr : pBufDesc->rptr;
+
+       if (uPtr1 == uPtr2) {
+               if (bFull)
+                       /* No data at all to read */
+                       return 0;
+               else {
+                       /* wrt pointer equal to read pointer thus the     */
+                       /* buffer is completely empty for further writes  */
+                       start = pBufDesc->start;
+                       end   = pBufDesc->end;
+                       /* The address to be returned in this case is for */
+                       /* the updated write pointer.                     */
+                       uTemp = uPtr1 + uSize;
+                       if (uTemp >= end)
+                               uTemp += (start - end);
+                       *puUpdateAddress = uTemp;
+                       return (end - start);
+               }
+       } else if (uPtr1 < uPtr2) {
+               /* return updated rd pointer address                */
+               /* In this case if size was too big - we expect the */
+               /* external ftn to compare the size against the     */
+               /* space returned.
+                */
+               *puUpdateAddress = uPtr1 + uSize;
+               return (uPtr2 - uPtr1);
+       }
+       /* We know the system has looped!! */
+       start = pBufDesc->start;
+       end   = pBufDesc->end;
+       uTemp  = uPtr1 + uSize;
+       if (uTemp >= end)
+               uTemp += (start - end);
+       *puUpdateAddress = uTemp;
+       return ((end - uPtr1) + (uPtr2 - start));
+}
+
+static void rpc_update_cmd_buffer_ptr_encoder(BUFFER_DESCRIPTOR_TYPE *pCmdDesc)
+{
+       u_int32 uWritePtr;
+
+       /*avoid sw reset fail*/
+       mb();
+       uWritePtr = pCmdDesc->wptr + 4;
+       if (uWritePtr >= pCmdDesc->end)
+               uWritePtr = pCmdDesc->start;
+       pCmdDesc->wptr = uWritePtr;
+}
+
+void rpc_send_cmd_buf_encoder(struct shared_addr *This,
+               u_int32 idx,
+               u_int32 cmdid,
+               u_int32 cmdnum,
+               u_int32 *local_cmddata)
+{
+       pENC_RPC_HOST_IFACE pSharedInterface = (pENC_RPC_HOST_IFACE)This->shared_mem_vir;
+       BUFFER_DESCRIPTOR_TYPE *pCmdDesc = &pSharedInterface->StreamCmdBufferDesc;
+       u_int32 *cmddata;
+       u_int32 i;
+       u_int32 *cmdword = (u_int32 *)(This->cmd_mem_vir+pCmdDesc->wptr - pCmdDesc->start);
+
+       *cmdword = 0;
+       *cmdword |= ((idx & 0x000000ff) << 24);
+       *cmdword |= ((cmdnum & 0x000000ff) << 16);
+       *cmdword |= ((cmdid & 0x00003fff) << 0);
+       rpc_update_cmd_buffer_ptr_encoder(pCmdDesc);
+
+       for (i = 0; i < cmdnum; i++) {
+               cmddata = (u_int32 *)(This->cmd_mem_vir+pCmdDesc->wptr - pCmdDesc->start);
+               *cmddata = local_cmddata[i];
+               rpc_update_cmd_buffer_ptr_encoder(pCmdDesc);
+       }
+}
+
+u_int32 rpc_MediaIPFW_Video_message_check_encoder(struct shared_addr *This)
+{
+       u_int32 uSpace;
+       u_int32 uIgnore;
+       pENC_RPC_HOST_IFACE pSharedInterface = (pENC_RPC_HOST_IFACE)This->shared_mem_vir;
+       BUFFER_DESCRIPTOR_TYPE *pMsgDesc = &pSharedInterface->StreamMsgBufferDesc;
+       u_int32 msgword;
+       u_int32 msgnum;
+
+       uSpace = rpc_MediaIPFW_Video_buffer_space_check_encoder(pMsgDesc, TRUE, 0, &uIgnore);
+       uSpace = (uSpace >> 2);
+       if (uSpace) {
+               /* get current msgword word */
+               msgword      = *((u_int32 *)(This->msg_mem_vir+pMsgDesc->rptr - pMsgDesc->start));
+               /* Find the number of additional words */
+               msgnum  = ((msgword & 0x00ff0000) >> 16);
+
+               /*
+                * * Check the number of message words against
+                * * 1) a limit - some sort of maximum or at least
+                * * the size of the SW buffer the message is read into
+                * * 2) The space reported (where space is write ptr - read ptr in 32bit words)
+                * * It must be less than space (as opposed to <=) because
+                * * the message itself is not included in msgword
+                */
+               if (msgnum < VID_API_MESSAGE_LIMIT) {
+                       if (msgnum < uSpace)
+                               return API_MSG_AVAILABLE;
+                       else
+                               return API_MSG_INCOMPLETE;
+               } else
+                       return API_MSG_BUFFER_ERROR;
+       }
+       return API_MSG_UNAVAILABLE;
+}
+
+static void rpc_update_msg_buffer_ptr_encoder(BUFFER_DESCRIPTOR_TYPE *pMsgDesc)
+{
+       u_int32 uReadPtr;
+
+       uReadPtr = pMsgDesc->rptr + 4;
+       if (uReadPtr >= pMsgDesc->end)
+               uReadPtr = pMsgDesc->start;
+       pMsgDesc->rptr = uReadPtr;
+}
+
+u32 rpc_read_msg_u32(struct shared_addr *shared_mem)
+{
+       u32 msgword;
+       u32 *ptr = NULL;
+       pENC_RPC_HOST_IFACE iface = NULL;
+       BUFFER_DESCRIPTOR_TYPE *msg_buf = NULL;
+
+       if (!shared_mem)
+               return 0;
+
+       iface = shared_mem->pSharedInterface;
+       msg_buf = &iface->StreamMsgBufferDesc;
+       ptr = shared_mem->msg_mem_vir + msg_buf->rptr - msg_buf->start;
+       rpc_update_msg_buffer_ptr_encoder(msg_buf);
+       msgword = *ptr;
+
+       return msgword;
+}
+
+int rpc_read_msg_array(struct shared_addr *shared_mem, u32 *buf, u32 number)
+{
+       int i;
+       u32 val;
+
+       if (!shared_mem)
+               return -EINVAL;
+
+       for (i = 0; i < number; i++) {
+               val = rpc_read_msg_u32(shared_mem);
+               if (buf)
+                       buf[i] = val;
+       }
+
+       return 0;
+}
+
+int rpc_get_msg_header(struct shared_addr *shared_mem, struct msg_header *msg)
+{
+       u32 msgword;
+
+       if (!shared_mem || !msg)
+               return -EINVAL;
+
+       msgword = rpc_read_msg_u32(shared_mem);
+       msg->idx = ((msgword & 0xff000000) >> 24);
+       msg->msgnum = ((msgword & 0x00ff0000) >> 16);
+       msg->msgid = ((msgword & 0x00003fff) >> 0);
+
+       return 0;
+}
+
+static void *phy_to_virt(u_int32 src, unsigned long long offset)
+{
+       void *result;
+
+       result = (void *)(src + offset);
+       return result;
+}
+
+#define GET_CTRL_INTERFACE_MEMBER(shared_mem, index, name, member) \
+       do {\
+               pENC_RPC_HOST_IFACE iface = shared_mem->pSharedInterface; \
+               pMEDIA_ENC_API_CONTROL_INTERFACE ctrl_interface =\
+                       phy_to_virt(iface->pEncCtrlInterface[index],\
+                                       shared_mem->base_offset);\
+               name = phy_to_virt(ctrl_interface->member,\
+                               shared_mem->base_offset);\
+       } while (0)
+
+pMEDIAIP_ENC_YUV_BUFFER_DESC rpc_get_yuv_buffer_desc(
+               struct shared_addr *shared_mem, int index)
+{
+       pMEDIAIP_ENC_YUV_BUFFER_DESC desc = NULL;
+
+       GET_CTRL_INTERFACE_MEMBER(shared_mem, index, desc, pEncYUVBufferDesc);
+
+       return desc;
+}
+
+pBUFFER_DESCRIPTOR_TYPE rpc_get_stream_buffer_desc(
+               struct shared_addr *shared_mem, int index)
+{
+       pBUFFER_DESCRIPTOR_TYPE desc = NULL;
+
+       GET_CTRL_INTERFACE_MEMBER(shared_mem, index,
+                               desc, pEncStreamBufferDesc);
+
+       return desc;
+}
+
+pMEDIAIP_ENC_EXPERT_MODE_PARAM rpc_get_expert_mode_param(
+               struct shared_addr *shared_mem, int index)
+{
+       pMEDIAIP_ENC_EXPERT_MODE_PARAM param = NULL;
+
+       GET_CTRL_INTERFACE_MEMBER(shared_mem, index,
+                               param, pEncExpertModeParam);
+
+       return param;
+}
+
+pMEDIAIP_ENC_PARAM rpc_get_enc_param(
+               struct shared_addr *shared_mem, int index)
+{
+       pMEDIAIP_ENC_PARAM param = NULL;
+
+       GET_CTRL_INTERFACE_MEMBER(shared_mem, index, param, pEncParam);
+
+       return param;
+}
+
+pMEDIAIP_ENC_MEM_POOL rpc_get_mem_pool(
+               struct shared_addr *shared_mem, int index)
+{
+       pMEDIAIP_ENC_MEM_POOL pool = NULL;
+
+       GET_CTRL_INTERFACE_MEMBER(shared_mem, index, pool, pEncMemPool);
+
+       return pool;
+}
+
+pENC_ENCODING_STATUS rpc_get_encoding_status(
+               struct shared_addr *shared_mem, int index)
+{
+       pENC_ENCODING_STATUS encoding_status = NULL;
+
+       GET_CTRL_INTERFACE_MEMBER(shared_mem, index,
+                               encoding_status, pEncEncodingStatus);
+
+       return encoding_status;
+}
+
+pENC_DSA_STATUS_t rpc_get_dsa_status(struct shared_addr *shared_mem, int index)
+{
+       pENC_DSA_STATUS_t dsa_status = NULL;
+
+       GET_CTRL_INTERFACE_MEMBER(shared_mem, index, dsa_status, pEncDSAStatus);
+
+       return dsa_status;
+}
+
+void rpc_set_print_buffer(struct shared_addr *shared_mem,
+                               unsigned long print_phy_addr, u32 size)
+{
+       pENC_RPC_HOST_IFACE pSharedInterface;
+       pBUFFER_DESCRIPTOR_TYPE debugBufDesc;
+
+
+       pSharedInterface = shared_mem->pSharedInterface;
+       debugBufDesc = &pSharedInterface->DebugBufferDesc;
+
+       debugBufDesc->start = print_phy_addr;
+       debugBufDesc->end = debugBufDesc->start + size;
+       debugBufDesc->wptr = debugBufDesc->rptr = debugBufDesc->start;
+}
diff --git a/drivers/mxc/vpu_windsor/vpu_encoder_rpc.h b/drivers/mxc/vpu_windsor/vpu_encoder_rpc.h
new file mode 100644 (file)
index 0000000..934b542
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 NXP. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of Intel Corporation nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __VPU_ENCODER_RPC_H__
+#define __VPU_ENCODER_RPC_H__
+
+#include "mediasys_types.h"
+
+#define CMD_SIZE 2560
+#define MSG_SIZE 25600
+#define CODEC_SIZE 0x1000
+#define JPEG_SIZE 0x1000
+#define SEQ_SIZE 0x1000
+#define GOP_SIZE 0x1000
+#define PIC_SIZE 0x1000
+#define QMETER_SIZE 0x1000
+#define DEBUG_SIZE 0x1000
+#define ENG_SIZE 0x1000
+#define LOCAL_MSG_NUM VID_API_MESSAGE_LIMIT
+
+struct shared_addr {
+       pENC_RPC_HOST_IFACE pSharedInterface;
+       unsigned long long shared_mem_phy;
+       void *shared_mem_vir;
+       unsigned long long cmd_mem_phy;
+       void *cmd_mem_vir;
+       unsigned long long msg_mem_phy;
+       void *msg_mem_vir;
+       unsigned long long codec_mem_phy;
+       void *codec_mem_vir;
+       unsigned long long jpeg_mem_phy;
+       void *jpeg_mem_vir;
+       unsigned long long seq_mem_phy;
+       void *seq_mem_vir;
+       unsigned long long pic_mem_phy;
+       void *pic_mem_vir;
+       unsigned long long gop_mem_phy;
+       void *gop_mem_vir;
+       unsigned long long qmeter_mem_phy;
+       void *qmeter_mem_vir;
+       unsigned long long base_offset;
+};
+
+struct msg_header {
+       u32 idx;
+       u32 msgnum;
+       u32 msgid;
+};
+
+void rpc_init_shared_memory_encoder(struct shared_addr *This,
+               unsigned long long base_phy_addr,
+               void *base_virt_addr,
+               u_int32 total_size,
+               u32 *actual_size);
+void rpc_set_system_cfg_value_encoder(void *Interface, u_int32 regs_base, u_int32 core_id);
+void rpc_send_cmd_buf_encoder(struct shared_addr *This,
+               u_int32 idx,
+               u_int32 cmdid,
+               u_int32 cmdnum,
+               u_int32 *local_cmddata);
+u32 rpc_read_msg_u32(struct shared_addr *shared_mem);
+int rpc_read_msg_array(struct shared_addr *shared_mem, u32 *buf, u32 number);
+int rpc_get_msg_header(struct shared_addr *shared_mem, struct msg_header *msg);
+
+pMEDIAIP_ENC_YUV_BUFFER_DESC rpc_get_yuv_buffer_desc(
+               struct shared_addr *shared_mem, int index);
+pBUFFER_DESCRIPTOR_TYPE rpc_get_stream_buffer_desc(
+               struct shared_addr *shared_mem, int index);
+pMEDIAIP_ENC_EXPERT_MODE_PARAM rpc_get_expert_mode_param(
+               struct shared_addr *shared_mem, int index);
+pMEDIAIP_ENC_PARAM rpc_get_enc_param(
+               struct shared_addr *shared_mem, int index);
+pMEDIAIP_ENC_MEM_POOL rpc_get_mem_pool(
+               struct shared_addr *shared_mem, int index);
+pENC_ENCODING_STATUS rpc_get_encoding_status(
+               struct shared_addr *shared_mem, int index);
+pENC_DSA_STATUS_t rpc_get_dsa_status(struct shared_addr *shared_mem, int index);
+void rpc_set_print_buffer(struct shared_addr *shared_mem,
+                               unsigned long print_phy_addr, u32 size);
+
+#endif
diff --git a/drivers/mxc/vpu_windsor/vpu_event_msg.c b/drivers/mxc/vpu_windsor/vpu_event_msg.c
new file mode 100644 (file)
index 0000000..7ac7acc
--- /dev/null
@@ -0,0 +1,237 @@
+/*
+ * Copyright(c) 2018 NXP. All rights reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * vpu_event_msg.c
+ *
+ * Author Ming Qian<ming.qian@nxp.com>
+ */
+#define TAG    "[VPU Encoder Msg]\t "
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+#include "vpu_encoder_b0.h"
+#include "vpu_event_msg.h"
+
+static atomic64_t total_ext_data = ATOMIC64_INIT(0);
+
+static struct vpu_event_msg *alloc_event_msg(void)
+{
+       struct vpu_event_msg *msg = NULL;
+
+       msg = vzalloc(sizeof(*msg));
+
+       return msg;
+}
+
+static void free_event_msg(struct vpu_event_msg *msg)
+{
+       if (!msg)
+               return;
+
+       free_msg_ext_buffer(msg);
+       VPU_SAFE_RELEASE(msg, vfree);
+}
+
+static void set_msg_count(struct vpu_ctx *ctx, unsigned long count)
+{
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+
+       if (attr)
+               attr->msg_count = count;
+}
+
+static void inc_msg_count(struct vpu_ctx *ctx)
+{
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+
+       if (attr)
+               attr->msg_count++;
+}
+
+static void dec_msg_count(struct vpu_ctx *ctx)
+{
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+
+       if (attr)
+               attr->msg_count--;
+}
+
+static bool is_msg_count_full(struct vpu_ctx *ctx)
+{
+       struct vpu_attr *attr = get_vpu_ctx_attr(ctx);
+
+       if (!attr)
+               return false;
+       if (attr->msg_count > MSG_COUNT_THD)
+               return true;
+       return false;
+}
+
+void cleanup_ctx_msg_queue(struct vpu_ctx *ctx)
+{
+       struct vpu_event_msg *msg;
+       struct vpu_event_msg *tmp;
+
+       WARN_ON(!ctx);
+
+       vpu_log_func();
+       mutex_lock(&ctx->instance_mutex);
+       list_for_each_entry_safe(msg, tmp, &ctx->msg_q, list) {
+               list_del_init(&msg->list);
+               vpu_dbg(LVL_MSG, "drop core[%d] ctx[%d] msg:[%d]\n",
+                               ctx->core_dev->id, ctx->str_index, msg->msgid);
+               VPU_SAFE_RELEASE(msg, free_event_msg);
+               dec_msg_count(ctx);
+       }
+
+       list_for_each_entry_safe(msg, tmp, &ctx->idle_q, list) {
+               list_del_init(&msg->list);
+               VPU_SAFE_RELEASE(msg, free_event_msg);
+               dec_msg_count(ctx);
+       }
+       mutex_unlock(&ctx->instance_mutex);
+}
+
+static int increase_idle_msg(struct vpu_ctx *ctx, u32 count)
+{
+       int i;
+
+       for (i = 0; i < count; i++) {
+               struct vpu_event_msg *msg = alloc_event_msg();
+
+               if (!msg)
+                       continue;
+               list_add_tail(&msg->list, &ctx->idle_q);
+               inc_msg_count(ctx);
+       }
+
+       return 0;
+}
+
+int init_ctx_msg_queue(struct vpu_ctx *ctx)
+{
+       WARN_ON(!ctx);
+       if (!ctx)
+               return -EINVAL;
+
+       vpu_log_func();
+       mutex_lock(&ctx->instance_mutex);
+
+       set_msg_count(ctx, 0);
+       INIT_LIST_HEAD(&ctx->msg_q);
+       INIT_LIST_HEAD(&ctx->idle_q);
+
+       mutex_unlock(&ctx->instance_mutex);
+
+       return 0;
+}
+
+struct vpu_event_msg *get_idle_msg(struct vpu_ctx *ctx)
+{
+       struct vpu_event_msg *msg = NULL;
+
+       WARN_ON(!ctx);
+
+       mutex_lock(&ctx->instance_mutex);
+       if (list_empty(&ctx->idle_q))
+               increase_idle_msg(ctx, 1);
+
+       msg = list_first_entry(&ctx->idle_q, struct vpu_event_msg, list);
+       if (msg)
+               list_del_init(&msg->list);
+
+       mutex_unlock(&ctx->instance_mutex);
+
+       return msg;
+}
+
+void put_idle_msg(struct vpu_ctx *ctx, struct vpu_event_msg *msg)
+{
+       WARN_ON(!ctx);
+
+       if (!ctx || !msg)
+               return;
+
+       free_msg_ext_buffer(msg);
+
+       mutex_lock(&ctx->instance_mutex);
+       if (is_msg_count_full(ctx)) {
+               VPU_SAFE_RELEASE(msg, free_event_msg);
+               dec_msg_count(ctx);
+       } else {
+               list_add_tail(&msg->list, &ctx->idle_q);
+       }
+       mutex_unlock(&ctx->instance_mutex);
+}
+
+struct vpu_event_msg *pop_event_msg(struct vpu_ctx *ctx)
+{
+       struct vpu_event_msg *msg = NULL;
+
+       WARN_ON(!ctx);
+
+       mutex_lock(&ctx->instance_mutex);
+       if (list_empty(&ctx->msg_q))
+               goto exit;
+
+       msg = list_first_entry(&ctx->msg_q, struct vpu_event_msg, list);
+       if (msg)
+               list_del_init(&msg->list);
+
+exit:
+       mutex_unlock(&ctx->instance_mutex);
+       return msg;
+}
+
+void push_back_event_msg(struct vpu_ctx *ctx, struct vpu_event_msg *msg)
+{
+       WARN_ON(!ctx);
+
+       if (!ctx || !msg)
+               return;
+
+       mutex_lock(&ctx->instance_mutex);
+       list_add_tail(&msg->list, &ctx->msg_q);
+       mutex_unlock(&ctx->instance_mutex);
+}
+
+int alloc_msg_ext_buffer(struct vpu_event_msg *msg, u32 number)
+{
+       WARN_ON(!msg);
+
+       if (!msg || !number)
+               return -EINVAL;
+
+       msg->ext_data = vzalloc(number * sizeof(u32));
+       if (!msg->ext_data)
+               return -ENOMEM;
+       msg->number = number;
+
+       atomic64_add(number, &total_ext_data);
+       vpu_dbg(LVL_MSG, "++++alloc %d msg ext data: %lld\n",
+                       number, get_total_ext_data_number());
+
+       return 0;
+}
+
+void free_msg_ext_buffer(struct vpu_event_msg *msg)
+{
+       WARN_ON(!msg);
+
+       if (!msg || !msg->ext_data)
+               return;
+
+       atomic64_sub(msg->number, &total_ext_data);
+       VPU_SAFE_RELEASE(msg->ext_data, vfree);
+       vpu_dbg(LVL_MSG, "----free %d msg ext data: %lld\n",
+                       msg->number, get_total_ext_data_number());
+}
+
+long long get_total_ext_data_number(void)
+{
+       return atomic64_read(&total_ext_data);
+}
diff --git a/drivers/mxc/vpu_windsor/vpu_event_msg.h b/drivers/mxc/vpu_windsor/vpu_event_msg.h
new file mode 100644 (file)
index 0000000..cc02956
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright(c) 2018 NXP. All rights reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * vpu_event_msg.h
+ *
+ * Author Ming Qian<ming.qian@nxp.com>
+ */
+#ifndef _VPU_EVENT_MSG_H
+#define _VPU_EVENT_MSG_H
+
+#include "vpu_encoder_config.h"
+
+struct vpu_event_msg {
+       struct list_head list;
+       u32 idx;
+       u32 msgid;
+       u32 number;
+       u32 data[MSG_DATA_DEFAULT_SIZE];
+       u32 *ext_data;
+};
+
+int init_ctx_msg_queue(struct vpu_ctx *ctx);
+void cleanup_ctx_msg_queue(struct vpu_ctx *ctx);
+struct vpu_event_msg *get_idle_msg(struct vpu_ctx *ctx);
+void put_idle_msg(struct vpu_ctx *ctx, struct vpu_event_msg *msg);
+struct vpu_event_msg *pop_event_msg(struct vpu_ctx *ctx);
+void push_back_event_msg(struct vpu_ctx *ctx, struct vpu_event_msg *msg);
+int alloc_msg_ext_buffer(struct vpu_event_msg *msg, u32 number);
+void free_msg_ext_buffer(struct vpu_event_msg *msg);
+long long get_total_ext_data_number(void);
+
+#endif