blob: 78539ded282bb202c6fe5e789ca5a26a02cff25d [file] [log] [blame]
/*
* drivers/amlogic/amports/vh264.c
*
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#define DEBUG
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/kfifo.h>
#include <linux/platform_device.h>
#include <linux/random.h>
#include <linux/amlogic/media/utils/amstream.h>
#include <linux/amlogic/media/frame_sync/ptsserv.h>
#include <linux/amlogic/media/canvas/canvas.h>
#include <linux/amlogic/media/vfm/vframe.h>
#include <linux/amlogic/media/vfm/vframe_provider.h>
#include <linux/amlogic/media/vfm/vframe_receiver.h>
#include <linux/amlogic/media/utils/vformat.h>
#include <linux/amlogic/media/frame_sync/tsync.h>
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#include <linux/atomic.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
#include "../../../stream_input/amports/amports_priv.h"
#include <linux/amlogic/media/codec_mm/codec_mm.h>
#include "../../decoder/utils/vdec_input.h"
//#include <linux/amlogic/tee.h>
#include <uapi/linux/tee.h>
#include <linux/sched/clock.h>
#include <linux/amlogic/media/utils/vdec_reg.h>
#include "../../decoder/utils/vdec.h"
#include "../../decoder/utils/amvdec.h"
#include "vh264.h"
#include "../../../stream_input/amports/streambuf.h"
#include <linux/delay.h>
#include <linux/amlogic/media/codec_mm/configs.h>
#include "../../decoder/utils/decoder_mmu_box.h"
#include "../../decoder/utils/decoder_bmmu_box.h"
#include "../../decoder/utils/firmware.h"
#include <linux/uaccess.h>
#include "../../decoder/utils/config_parser.h"
#include "../../../common/chips/decoder_cpu_ver_info.h"
#include "../../decoder/utils/vdec_v4l2_buffer_ops.h"
#include <linux/crc32.h>
#include <media/v4l2-mem2mem.h>
#include "../../decoder/utils/vdec_feature.h"
#define DETECT_WRONG_MULTI_SLICE
/*
to enable DV of frame mode
#define DOLBY_META_SUPPORT in ucode
*/
#undef pr_info
#define pr_info printk
#define VDEC_DW
#define DEBUG_UCODE
#define MEM_NAME "codec_m264"
#define MULTI_INSTANCE_FRAMEWORK
/* #define ONE_COLOCATE_BUF_PER_DECODE_BUF */
#include "h264_dpb.h"
/* #define SEND_PARAM_WITH_REG */
#define DRIVER_NAME "ammvdec_h264_v4l"
#define DRIVER_HEADER_NAME "ammvdec_h264_header"
#define CHECK_INTERVAL (HZ/100)
#define SEI_DATA_SIZE (8*1024)
#define SEI_ITU_DATA_SIZE (4*1024)
#define RATE_MEASURE_NUM 8
#define RATE_CORRECTION_THRESHOLD 5
#define RATE_2397_FPS 4004 /* 23.97 */
#define RATE_25_FPS 3840 /* 25 */
#define RATE_2997_FPS 3203 /* 29.97 */
#define RATE_5994_FPS 1601 /* 59.94 */
#define RATE_11990_FPS 800 /* 119.90 */
#define DUR2PTS(x) ((x)*90/96)
#define PTS2DUR(x) ((x)*96/90)
#define DUR2PTS_REM(x) (x*90 - DUR2PTS(x)*96)
#define FIX_FRAME_RATE_CHECK_IFRAME_NUM 2
#define FIX_FRAME_RATE_OFF 0
#define FIX_FRAME_RATE_ON 1
#define FIX_FRAME_RATE_SMOOTH_CHECKING 2
#define DEC_CONTROL_FLAG_FORCE_2997_1080P_INTERLACE 0x0001
#define DEC_CONTROL_FLAG_FORCE_2500_576P_INTERLACE 0x0002
#define DEC_CONTROL_FLAG_FORCE_RATE_2397_FPS_FIX_FRAME_RATE 0x0010
#define DEC_CONTROL_FLAG_FORCE_RATE_2997_FPS_FIX_FRAME_RATE 0x0020
#define DECODE_ID(hw) (hw_to_vdec(hw)->id)
#define RATE_MEASURE_NUM 8
#define RATE_CORRECTION_THRESHOLD 5
#define RATE_24_FPS 4004 /* 23.97 */
#define RATE_25_FPS 3840 /* 25 */
#define DUR2PTS(x) ((x)*90/96)
#define PTS2DUR(x) ((x)*96/90)
#define DUR2PTS_REM(x) (x*90 - DUR2PTS(x)*96)
#define FIX_FRAME_RATE_CHECK_IDRFRAME_NUM 2
#define ALIGN_WIDTH(x) (ALIGN((x), 64))
#define ALIGN_HEIGHT(x) (ALIGN((x), 32))
#define H264_DEV_NUM 9
#define CONSTRAIN_MAX_BUF_NUM
#define H264_MMU
#define VIDEO_SIGNAL_TYPE_AVAILABLE_MASK 0x20000000
#define INVALID_IDX -1 /* Invalid buffer index.*/
static int mmu_enable;
/*mmu do not support mbaff*/
static int force_enable_mmu = 0;
unsigned int h264_debug_flag; /* 0xa0000000; */
unsigned int h264_debug_mask = 0xff;
/*
*h264_debug_cmd:
* 0x1xx, force decoder id of xx to be disconnected
*/
unsigned int h264_debug_cmd;
static int ref_b_frame_error_max_count = 50;
static unsigned int dec_control =
DEC_CONTROL_FLAG_FORCE_2997_1080P_INTERLACE |
DEC_CONTROL_FLAG_FORCE_2500_576P_INTERLACE;
static unsigned int force_rate_streambase;
static unsigned int force_rate_framebase;
static unsigned int force_disp_bufspec_num;
static unsigned int fixed_frame_rate_mode;
static unsigned int error_recovery_mode_in;
static int start_decode_buf_level = 0x4000;
static int pre_decode_buf_level = 0x1000;
static int stream_mode_start_num = 4;
static unsigned int colocate_old_cal;
#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION
/*to make reorder size difference of bl and el not too big*/
static unsigned int reorder_dpb_size_margin_dv = 16;
#endif
static unsigned int reorder_dpb_size_margin = 6;
static unsigned int reference_buf_margin = 4;
#ifdef CONSTRAIN_MAX_BUF_NUM
static u32 run_ready_max_vf_only_num;
static u32 run_ready_display_q_num;
/*0: not check
0xff: mDPB.size
*/
static u32 run_ready_max_buf_num = 0xff;
#endif
static u32 run_ready_min_buf_num = 2;
#define VDEC_ASSIST_CANVAS_BLK32 0x5
static unsigned int max_alloc_buf_count;
static unsigned int decode_timeout_val = 100;
static unsigned int errordata_timeout_val = 50;
static unsigned int get_data_timeout_val = 2000;
#if 1
/* H264_DATA_REQUEST does not work, disable it,
decode has error for data in none continuous address
*/
static unsigned int frame_max_data_packet;
#else
static unsigned int frame_max_data_packet = 8;
#endif
static unsigned int radr;
static unsigned int rval;
static u32 endian = 0xff0;
/*
udebug_flag:
bit 0, enable ucode print
bit 1, enable ucode detail print
bit 3, disable ucode watchdog
bit [31:16] not 0, pos to dump lmem
bit 2, pop bits to lmem
bit [11:8], pre-pop bits for alignment (when bit 2 is 1)
*/
static u32 udebug_flag;
/*
when udebug_flag[1:0] is not 0
udebug_pause_pos not 0,
pause position
*/
static u32 udebug_pause_pos;
/*
when udebug_flag[1:0] is not 0
and udebug_pause_pos is not 0,
pause only when DEBUG_REG2 is equal to this val
*/
static u32 udebug_pause_val;
static u32 udebug_pause_decode_idx;
static unsigned int disp_vframe_valve_level;
static unsigned int max_decode_instance_num = H264_DEV_NUM;
static unsigned int decode_frame_count[H264_DEV_NUM];
static unsigned int display_frame_count[H264_DEV_NUM];
static unsigned int max_process_time[H264_DEV_NUM];
static unsigned int max_get_frame_interval[H264_DEV_NUM];
static unsigned int run_count[H264_DEV_NUM];
static unsigned int input_empty[H264_DEV_NUM];
static unsigned int not_run_ready[H264_DEV_NUM];
static unsigned int ref_frame_mark_flag[H264_DEV_NUM] =
{1, 1, 1, 1, 1, 1, 1, 1, 1};
#define VDEC_CLOCK_ADJUST_FRAME 30
static unsigned int clk_adj_frame_count;
/*
*bit[3:0]: 0, run ; 1, pause; 3, step
*bit[4]: 1, schedule run
*/
static unsigned int step[H264_DEV_NUM];
#define AUX_BUF_ALIGN(adr) ((adr + 0xf) & (~0xf))
static u32 prefix_aux_buf_size = (16 * 1024);
static u32 suffix_aux_buf_size;
#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION
static u32 dv_toggle_prov_name;
static u32 dolby_meta_with_el;
#endif
/*
bit[8]
0: use sys_info[bit 3]
not 0:use i_only_flag[7:0]
bit[7:0]:
bit 0, 1: only display I picture;
bit 1, 1: only decode I picture;
*/
static unsigned int i_only_flag;
/*
error_proc_policy:
bit[0] send_error_frame_flag;
(valid when bit[31] is 1, otherwise use sysinfo)
bit[1] do not decode if config_decode_buf() fail
bit[2] force release buf if in deadlock
bit[3] force sliding window ref_frames_in_buffer > num_ref_frames
bit[4] check inactive of receiver
bit[5] reset buffmgr if in deadlock
bit[6] reset buffmgr if bufspec, collocate buf, pic alloc fail
bit[7] reset buffmgr if dpb error
bit[8] check total mbx/mby of decoded frame
bit[9] check ERROR_STATUS_REG
bit[10] check reference list
bit[11] mark error if dpb error
bit[12] i_only when error happen
bit[13] 0: mark error according to last pic, 1: ignore mark error
bit[14] 0: result done when timeout from ucode. 1: reset bufmgr when timeout.
bit[15] 1: dpb_frame_count If the dpb_frame_count difference is large, it moves out of the DPB buffer.
bit[16] 1: check slice header number.
bit[17] 1: If the decoded Mb count is insufficient but greater than the threshold, it is considered the correct frame.
bit[18] 1: time out status, store pic to dpb buffer.
bit[19] 1: If a lot b frames are wrong consecutively, the DPB queue reset.
bit[20] 1: fixed some error stream will lead to the diffusion of the error, resulting playback stuck.
bit[21] 1: fixed DVB loop playback cause jetter issue.
bit[22] 1: In streaming mode, support for discarding data.
bit[23] 0: set error flag on frame number gap error and drop it, 1: ignore error.
*/
static unsigned int error_proc_policy = 0x3fCfb6; /*0x1f14*/
static unsigned int v4l_error_policy = 0x8017C3B5; //default
/*
error_skip_count:
bit[11:0] error skip frame count
bit[15:12] error skip i picture count
*/
static unsigned int error_skip_count = (0x2 << 12) | 0x40;
static unsigned int force_sliding_margin;
/*
bit[1:0]:
0, start playing from any frame
1, start playing from I frame
bit[15:8]: the count of skip frames after first I
2, start playing from second I frame (decode from the first I)
bit[15:8]: the max count of skip frames after first I
3, start playing from IDR
*/
static unsigned int first_i_policy = 1;
/*
fast_output_enable:
bit [0], output frame if there is IDR in list
bit [1], output frame if the current poc is 1 big than the previous poc
bit [2], if even poc only, output frame ifthe cuurent poc
is 2 big than the previous poc
bit [3], ip only
*/
static unsigned int fast_output_enable = H264_OUTPUT_MODE_NORMAL;
static unsigned int enable_itu_t35 = 1;
static unsigned int frmbase_cont_bitlevel = 0x40;
static unsigned int frmbase_cont_bitlevel2 = 0x1;
static unsigned int check_slice_num = 30;
static unsigned int mb_count_threshold = 5; /*percentage*/
#define MH264_USERDATA_ENABLE
/* DOUBLE_WRITE_MODE is enabled only when NV21 8 bit output is needed */
/* hevc->double_write_mode:
0, no double write
1, 1:1 ratio
2, (1/4):(1/4) ratio
3, (1/4):(1/4) ratio, with both compressed frame included
4, (1/2):(1/2) ratio
0x10, double write only
0x10000: vdec dw horizotal 1/2
0x20000: vdec dw horizotal/vertical 1/2
*/
static u32 double_write_mode;
static u32 without_display_mode;
static int loop_playback_poc_threshold = 400;
static int poc_threshold = 50;
static u32 lookup_check_conut = 30;
/*
*[3:0] 0: default use config from omx.
* 1: force enable fence.
* 2: disable fence.
*[7:4] 0: fence use for driver.
* 1: fence fd use for app.
*/
static u32 force_config_fence;
static u32 adjust_dpb_size = 13;
#define IS_VDEC_DW(hw) (hw->double_write_mode >> 16 & 0xf)
static void vmh264_dump_state(struct vdec_s *vdec);
#define is_in_parsing_state(status) \
((status == H264_ACTION_SEARCH_HEAD) || \
((status & 0xf0) == 0x80))
#define is_interlace(frame) \
((frame->frame &&\
frame->top_field &&\
frame->bottom_field &&\
(!frame->frame->coded_frame)) || \
(frame->frame && \
frame->frame->coded_frame && \
(!frame->frame->frame_mbs_only_flag) && \
frame->frame->structure == FRAME))
static inline bool close_to(int a, int b, int m)
{
return (abs(a - b) < m) ? true : false;
}
#if 0
#define h264_alloc_hw_stru(dev, size, opt) devm_kzalloc(dev, size, opt)
#define h264_free_hw_stru(dev, hw) devm_kfree(dev, hw)
#else
#define h264_alloc_hw_stru(dev, size, opt) vzalloc(size)
#define h264_free_hw_stru(dev, hw) vfree(hw)
#endif
/* #if MESON_CPU_TYPE >= MESON_CPU_TYPE_MESON6 */
#define NV21
/* #endif */
/* 12M for L41 */
#define MAX_DPB_BUFF_SIZE (12*1024*1024)
#define DEFAULT_MEM_SIZE (32*1024*1024)
#define AVIL_DPB_BUFF_SIZE 0x01ec2000
#define DEF_BUF_START_ADDR 0x00000000
#define mem_sps_base 0x01c3c00
#define mem_pps_base 0x01cbc00
/*#define V_BUF_ADDR_OFFSET (0x13e000)*/
u32 V_BUF_ADDR_OFFSET = 0x200000;
#define DCAC_READ_MARGIN (64 * 1024)
#define EXTEND_SAR 0xff
#define BUFSPEC_POOL_SIZE 64
#define VF_POOL_SIZE 64
#define VF_POOL_NUM 2
#define MAX_VF_BUF_NUM 27
#define BMMU_MAX_BUFFERS (BUFSPEC_POOL_SIZE + 3)
#define BMMU_REF_IDX (BUFSPEC_POOL_SIZE)
#define BMMU_DPB_IDX (BUFSPEC_POOL_SIZE + 1)
#define BMMU_EXTIF_IDX (BUFSPEC_POOL_SIZE + 2)
#define EXTIF_BUF_SIZE (0x10000 * 2)
#define HEADER_BUFFER_IDX(n) (n)
#define VF_BUFFER_IDX(n) (n)
#define PUT_INTERVAL (HZ/100)
#define NO_DISP_WD_COUNT (3 * HZ / PUT_INTERVAL)
#define MMU_MAX_BUFFERS BUFSPEC_POOL_SIZE
#define SWITCHING_STATE_OFF 0
#define SWITCHING_STATE_ON_CMD3 1
#define SWITCHING_STATE_ON_CMD1 2
#define INCPTR(p) ptr_atomic_wrap_inc(&p)
#define SLICE_TYPE_I 2
#define SLICE_TYPE_P 5
#define SLICE_TYPE_B 6
struct buffer_spec_s {
/*
used:
-1, none allocated
0, allocated, free
1, used by dpb
2, in disp queue;
3, in disp queue, isolated,
do not use for dpb when vf_put;
4, to release
5, in disp queue, isolated (but not to release)
do not use for dpb when vf_put;
*/
unsigned int used;
unsigned int info0;
unsigned int info1;
unsigned int info2;
unsigned int y_addr;
unsigned int u_addr;
unsigned int v_addr;
int y_canvas_index;
int u_canvas_index;
int v_canvas_index;
#ifdef VDEC_DW
unsigned int vdec_dw_y_addr;
unsigned int vdec_dw_u_addr;
unsigned int vdec_dw_v_addr;
int vdec_dw_y_canvas_index;
int vdec_dw_u_canvas_index;
int vdec_dw_v_canvas_index;
#ifdef NV21
struct canvas_config_s vdec_dw_canvas_config[2];
#else
struct canvas_config_s vdec_dw_canvas_config[3];
#endif
#endif
#ifdef NV21
struct canvas_config_s canvas_config[2];
#else
struct canvas_config_s canvas_config[3];
#endif
unsigned long cma_alloc_addr;
unsigned int buf_adr;
#ifdef H264_MMU
unsigned long alloc_header_addr;
#endif
char *aux_data_buf;
int aux_data_size;
#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION
unsigned char dv_enhance_exist;
#endif
int canvas_pos;
int vf_ref;
/*unsigned int comp_body_size;*/
unsigned int dw_y_adr;
unsigned int dw_u_v_adr;
int fs_idx;
int ctx_buf_idx;
};
#define AUX_DATA_SIZE(pic) (hw->buffer_spec[pic->buf_spec_num].aux_data_size)
#define AUX_DATA_BUF(pic) (hw->buffer_spec[pic->buf_spec_num].aux_data_buf)
#define DEL_EXIST(h, p) (h->buffer_spec[p->buf_spec_num].dv_enhance_exist)
#define vdec_dw_spec2canvas(x) \
(((x)->vdec_dw_v_canvas_index << 16) | \
((x)->vdec_dw_u_canvas_index << 8) | \
((x)->vdec_dw_y_canvas_index << 0))
#define spec2canvas(x) \
(((x)->v_canvas_index << 16) | \
((x)->u_canvas_index << 8) | \
((x)->y_canvas_index << 0))
#define FRAME_INDEX(vf_index) (vf_index & 0xff)
#define BUFSPEC_INDEX(vf_index) ((vf_index >> 8) & 0xff)
#define VF_INDEX(frm_idx, bufspec_idx) (frm_idx | (bufspec_idx << 8))
static struct vframe_s *vh264_vf_peek(void *);
static struct vframe_s *vh264_vf_get(void *);
static void vh264_vf_put(struct vframe_s *, void *);
static int vh264_vf_states(struct vframe_states *states, void *);
static int vh264_event_cb(int type, void *data, void *private_data);
static void vh264_work(struct work_struct *work);
static void vh264_timeout_work(struct work_struct *work);
static void vh264_notify_work(struct work_struct *work);
#ifdef MH264_USERDATA_ENABLE
static void user_data_ready_notify_work(struct work_struct *work);
static void vmh264_wakeup_userdata_poll(struct vdec_s *vdec);
#endif
static const char vh264_dec_id[] = "vh264-dev";
#define PROVIDER_NAME "vdec.h264"
static const struct vframe_operations_s vf_provider_ops = {
.peek = vh264_vf_peek,
.get = vh264_vf_get,
.put = vh264_vf_put,
.event_cb = vh264_event_cb,
.vf_states = vh264_vf_states,
};
#define DEC_RESULT_NONE 0
#define DEC_RESULT_DONE 1
#define DEC_RESULT_AGAIN 2
#define DEC_RESULT_CONFIG_PARAM 3
#define DEC_RESULT_GET_DATA 4
#define DEC_RESULT_GET_DATA_RETRY 5
#define DEC_RESULT_ERROR 6
#define DEC_RESULT_EOS 7
#define DEC_RESULT_FORCE_EXIT 8
#define DEC_RESULT_TIMEOUT 9
#define DEC_RESULT_NEED_MORE_BUFFER 10
/*
*static const char *dec_result_str[] = {
* "DEC_RESULT_NONE ",
* "DEC_RESULT_DONE ",
* "DEC_RESULT_AGAIN ",
* "DEC_RESULT_CONFIG_PARAM",
* "DEC_RESULT_GET_DATA ",
* "DEC_RESULT_GET_DA_RETRY",
* "DEC_RESULT_ERROR ",
*};
*/
#define UCODE_IP_ONLY 2
#define UCODE_IP_ONLY_PARAM 1
#define MC_OFFSET_HEADER 0x0000
#define MC_OFFSET_DATA 0x1000
#define MC_OFFSET_MMCO 0x2000
#define MC_OFFSET_LIST 0x3000
#define MC_OFFSET_SLICE 0x4000
#define MC_OFFSET_MAIN 0x5000
#define MC_TOTAL_SIZE ((20+16)*SZ_1K)
#define MC_SWAP_SIZE (4*SZ_1K)
#define MODE_ERROR 0
#define MODE_FULL 1
#define DFS_HIGH_THEASHOLD 3
#define INIT_FLAG_REG AV_SCRATCH_2
#define HEAD_PADING_REG AV_SCRATCH_3
#define UCODE_WATCHDOG_REG AV_SCRATCH_7
#define LMEM_DUMP_ADR AV_SCRATCH_L
#define DEBUG_REG1 AV_SCRATCH_M
#define DEBUG_REG2 AV_SCRATCH_N
#define FRAME_COUNTER_REG AV_SCRATCH_I
#define RPM_CMD_REG AV_SCRATCH_A
#define H264_DECODE_SIZE AV_SCRATCH_E
#define H264_DECODE_MODE AV_SCRATCH_4
#define H264_DECODE_SEQINFO AV_SCRATCH_5
#define H264_AUX_ADR AV_SCRATCH_C
#define H264_AUX_DATA_SIZE AV_SCRATCH_H
#define H264_DECODE_INFO M4_CONTROL_REG /* 0xc29 */
#define DPB_STATUS_REG AV_SCRATCH_J
#define ERROR_STATUS_REG AV_SCRATCH_9
/*
NAL_SEARCH_CTL: bit 0, enable itu_t35
NAL_SEARCH_CTL: bit 1, enable mmu
NAL_SEARCH_CTL: bit 2, detect frame_mbs_only_flag whether switch resolution
NAL_SEARCH_CTL: bit 15,bitstream_restriction_flag
*/
#define NAL_SEARCH_CTL AV_SCRATCH_9
#define MBY_MBX MB_MOTION_MODE /*0xc07*/
#define DECODE_MODE_SINGLE 0x0
#define DECODE_MODE_MULTI_FRAMEBASE 0x1
#define DECODE_MODE_MULTI_STREAMBASE 0x2
#define DECODE_MODE_MULTI_DVBAL 0x3
#define DECODE_MODE_MULTI_DVENL 0x4
static DEFINE_MUTEX(vmh264_mutex);
static DEFINE_MUTEX(reset_mutex);
#ifdef MH264_USERDATA_ENABLE
struct mh264_userdata_record_t {
struct userdata_meta_info_t meta_info;
u32 rec_start;
u32 rec_len;
};
struct mh264_ud_record_wait_node_t {
struct list_head list;
struct mh264_userdata_record_t ud_record;
};
#define USERDATA_FIFO_NUM 256
#define MAX_FREE_USERDATA_NODES 5
struct mh264_userdata_info_t {
struct mh264_userdata_record_t records[USERDATA_FIFO_NUM];
u8 *data_buf;
u8 *data_buf_end;
u32 buf_len;
u32 read_index;
u32 write_index;
u32 last_wp;
};
#endif
struct mh264_fence_vf_t {
u32 used_size;
struct vframe_s *fence_vf[VF_POOL_SIZE];
};
struct vdec_h264_hw_s {
spinlock_t lock;
spinlock_t bufspec_lock;
int id;
struct platform_device *platform_dev;
unsigned long cma_alloc_addr;
/* struct page *collocate_cma_alloc_pages; */
unsigned long collocate_cma_alloc_addr;
u32 prefix_aux_size;
u32 suffix_aux_size;
void *aux_addr;
dma_addr_t aux_phy_addr;
/* buffer for store all sei data */
void *sei_data_buf;
u32 sei_data_len;
/* buffer for storing one itu35 recored */
void *sei_itu_data_buf;
u32 sei_itu_data_len;
/* recycle buffer for user data storing all itu35 records */
void *sei_user_data_buffer;
u32 sei_user_data_wp;
#ifdef MH264_USERDATA_ENABLE
struct work_struct user_data_ready_work;
#endif
struct StorablePicture *last_dec_picture;
ulong lmem_phy_addr;
dma_addr_t lmem_addr;
void *bmmu_box;
#ifdef H264_MMU
void *mmu_box;
void *frame_mmu_map_addr;
dma_addr_t frame_mmu_map_phy_addr;
u32 hevc_cur_buf_idx;
u32 losless_comp_body_size;
u32 losless_comp_body_size_sao;
u32 losless_comp_header_size;
u32 mc_buffer_size_u_v;
u32 mc_buffer_size_u_v_h;
u32 is_idr_frame;
u32 is_new_pic;
u32 frame_done;
u32 frame_busy;
unsigned long extif_addr;
int double_write_mode;
int mmu_enable;
#endif
DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE);
DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE);
int cur_pool;
struct vframe_s vfpool[VF_POOL_NUM][VF_POOL_SIZE];
struct buffer_spec_s buffer_spec[BUFSPEC_POOL_SIZE];
struct vframe_s switching_fense_vf;
struct h264_dpb_stru dpb;
u8 init_flag;
u8 first_sc_checked;
u8 has_i_frame;
u8 config_bufmgr_done;
u32 max_reference_size;
u32 decode_pic_count;
u32 reflist_error_count;
int start_search_pos;
u32 reg_iqidct_control;
bool reg_iqidct_control_init_flag;
u32 reg_vcop_ctrl_reg;
u32 reg_rv_ai_mb_count;
u32 vld_dec_control;
struct vframe_s vframe_dummy;
unsigned char buffer_empty_flag;
u32 frame_width;
u32 frame_height;
u32 frame_dur;
u32 frame_prog;
u32 frame_packing_type;
struct vframe_chunk_s *chunk;
u32 stat;
unsigned long buf_start;
u32 buf_offset;
u32 buf_size;
/* u32 ucode_map_start; */
u32 pts_outside;
u32 sync_outside;
u32 vh264_ratio;
u32 vh264_rotation;
u32 use_idr_framerate;
u32 seq_info;
u32 seq_info2;
u32 video_signal_from_vui; /*to do .. */
u32 timing_info_present_flag;
u32 fixed_frame_rate_flag;
u32 bitstream_restriction_flag;
u32 num_reorder_frames;
u32 max_dec_frame_buffering;
u32 iframe_count;
u32 aspect_ratio_info;
u32 num_units_in_tick;
u32 time_scale;
u32 h264_ar;
bool h264_first_valid_pts_ready;
u32 h264pts1;
u32 h264pts2;
u32 pts_duration;
u32 h264_pts_count;
u32 duration_from_pts_done;
u32 pts_unstable;
u32 unstable_pts;
u32 last_checkout_pts;
u32 max_refer_buf;
s32 vh264_stream_switching_state;
struct vframe_s *p_last_vf;
u32 last_pts;
u32 last_pts_remainder;
u32 last_duration;
u32 last_mb_width, last_mb_height;
bool check_pts_discontinue;
bool pts_discontinue;
u32 wait_buffer_counter;
u32 first_offset;
u32 first_pts;
u64 first_pts64;
bool first_pts_cached;
u64 last_pts64;
#if 0
void *sei_data_buffer;
dma_addr_t sei_data_buffer_phys;
#endif
uint error_recovery_mode;
uint mb_total;
uint mb_width;
uint mb_height;
uint i_only;
int skip_frame_count;
bool no_poc_reorder_flag;
bool send_error_frame_flag;
dma_addr_t mc_dma_handle;
void *mc_cpu_addr;
int vh264_reset;
atomic_t vh264_active;
struct dec_sysinfo vh264_amstream_dec_info;
int dec_result;
u32 timeout_processing;
struct work_struct work;
struct work_struct notify_work;
struct work_struct timeout_work;
void (*vdec_cb)(struct vdec_s *, void *);
void *vdec_cb_arg;
struct timer_list check_timer;
/**/
unsigned int last_frame_time;
atomic_t vf_pre_count;
atomic_t vf_get_count;
atomic_t vf_put_count;
/* timeout handle */
unsigned long int start_process_time;
unsigned int last_mby_mbx;
unsigned int last_vld_level;
unsigned int decode_timeout_count;
unsigned int timeout_num;
unsigned int search_dataempty_num;
unsigned int decode_timeout_num;
unsigned int decode_dataempty_num;
unsigned int buffer_empty_recover_num;
unsigned get_data_count;
unsigned get_data_start_time;
/**/
/*log*/
unsigned int packet_write_success_count;
unsigned int packet_write_EAGAIN_count;
unsigned int packet_write_ENOMEM_count;
unsigned int packet_write_EFAULT_count;
unsigned int total_read_size_pre;
unsigned int total_read_size;
unsigned int frame_count_pre;
#ifdef CONFIG_AMLOGIC_MEDIA_ENHANCEMENT_DOLBYVISION
u8 switch_dvlayer_flag;
u8 got_valid_nal;
#endif
u8 eos;
u8 data_flag;
u32 no_error_count;
u32 no_error_i_count;
/*
NODISP_FLAG
*/
u8 dec_flag;
u32 ucode_pause_pos;
u8 reset_bufmgr_flag;
u32 reset_bufmgr_count;
ulong timeout;
u32 timeout_flag;
u32 cfg_param1;
u32 cfg_param2;
u32 cfg_param3;
u32 cfg_param4;
u32 cfg_bitstream_restriction_flag;
int valve_count;
u8 next_again_flag;
u32 pre_parser_wr_ptr;
struct firmware_s *fw;
struct firmware_s *fw_mmu;
#ifdef MH264_USERDATA_ENABLE
/*user data*/
struct mutex userdata_mutex;
struct mh264_userdata_info_t userdata_info;
struct mh264_userdata_record_t ud_record;
int wait_for_udr_send;
#endif
u32 no_mem_count;
u32 canvas_mode;
bool is_used_v4l;
void *v4l2_ctx;
bool v4l_params_parsed;
wait_queue_head_t wait_q;
u32 reg_g_status;
struct mutex chunks_mutex;
int need_cache_size;
u64 sc_start_time;
u8 frmbase_cont_flag;
struct vframe_qos_s vframe_qos;
int frameinfo_enable;
bool first_head_check_flag;
unsigned int height_aspect_ratio;
unsigned int width_aspect_ratio;
unsigned int first_i_policy;
u32 reorder_dpb_size_margin;
bool wait_reset_done_flag;
#ifdef DETECT_WRONG_MULTI_SLICE
unsigned int multi_slice_pic_check_count;
/* multi_slice_pic_flag:
0, unknown;
1, single slice;
2, multi slice
*/
unsigned int multi_slice_pic_flag;
unsigned int picture_slice_count;
unsigned int cur_picture_slice_count;
unsigned char force_slice_as_picture_flag;
unsigned int last_picture_slice_count;
unsigned int first_pre_frame_num;
#endif
u32 res_ch_flag;
u32 b_frame_error_count;
struct vdec_info gvs;
u32 kpi_first_i_comming;
u32 kpi_first_i_decoded;
int sidebind_type;
int sidebind_channel_id;
u32 low_latency_mode;
int ip_field_error_count;
int buffer_wrap[BUFSPEC_POOL_SIZE];
int loop_flag;
int loop_last_poc;
bool enable_fence;
int fence_usage;
bool discard_dv_data;
u32 metadata_config_flag;
int vdec_pg_enable_flag;
u32 save_reg_f;
u32 start_bit_cnt;
u32 right_frame_count;
u32 wrong_frame_count;
u32 error_frame_width;
u32 error_frame_height;
ulong fb_token;
struct mh264_fence_vf_t fence_vf_s;
struct mutex fence_mutex;
u32 no_decoder_buffer_flag;
u32 video_signal_type;
bool need_free_aux_data;
u32 error_proc_policy;
struct trace_decoder_name trace;
int csd_change_flag;
};
static u32 again_threshold;
static void timeout_process(struct vdec_h264_hw_s *hw);
static void dump_bufspec(struct vdec_h264_hw_s *hw,
const char *caller);
static void h264_reconfig(struct vdec_h264_hw_s *hw);
static void h264_reset_bufmgr_v4l(struct vdec_s *vdec, int flush_flag);
static void vh264_local_init(struct vdec_h264_hw_s *hw, bool is_reset);
static int vh264_hw_ctx_restore(struct vdec_h264_hw_s *hw);
static int vh264_stop(struct vdec_h264_hw_s *hw);
static s32 vh264_init(struct vdec_h264_hw_s *hw);
static void set_frame_info(struct vdec_h264_hw_s *hw, struct vframe_s *vf,
u32 index);
static void release_aux_data(struct vdec_h264_hw_s *hw,
int buf_spec_num);
#ifdef ERROR_HANDLE_TEST
static void h264_clear_dpb(struct vdec_h264_hw_s *hw);
#endif
#define H265_PUT_SAO_4K_SET 0x03
#define H265_ABORT_SAO_4K_SET 0x04
#define H265_ABORT_SAO_4K_SET_DONE 0x05
#define SYS_COMMAND HEVC_ASSIST_SCRATCH_0
#define H265_CHECK_AXI_INFO_BASE HEVC_ASSIST_SCRATCH_8
#define H265_SAO_4K_SET_BASE HEVC_ASSIST_SCRATCH_9
#define H265_SAO_4K_SET_COUNT HEVC_ASSIST_SCRATCH_A
#define HEVCD_MPP_ANC2AXI_TBL_DATA 0x3464
#define HEVC_CM_HEADER_START_ADDR 0x3628
#define HEVC_CM_BODY_START_ADDR 0x3626
#define HEVC_CM_BODY_LENGTH 0x3627
#define HEVC_CM_HEADER_LENGTH 0x3629
#define HEVC_CM_HEADER_OFFSET 0x362b
#define HEVC_SAO_CTRL9 0x362d
#define HEVCD_MPP_DECOMP_CTL3 0x34c4
#define HEVCD_MPP_VDEC_MCR_CTL 0x34c8
#define HEVC_DBLK_CFGB 0x350b
#define HEVC_ASSIST_MMU_MAP_ADDR 0x3009
#define H265_DW_NO_SCALE
#define H265_MEM_MAP_MODE 0 /*0:linear 1:32x32 2:64x32*/
#define H265_LOSLESS_COMPRESS_MODE
#define MAX_FRAME_4K_NUM 0x1200
#define FRAME_MMU_MAP_SIZE (MAX_FRAME_4K_NUM * 4)
/* 0:linear 1:32x32 2:64x32 ; m8baby test1902 */
static u32 mem_map_mode = H265_MEM_MAP_MODE;
#define MAX_SIZE_4K (4096 * 2304)
#define MAX_SIZE_2K (1920 * 1088)
static int is_oversize(int w, int h)
{
int max = MAX_SIZE_4K;
if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D)
max = MAX_SIZE_2K;
if (w < 0 || h < 0)
return true;
if (h != 0 && (w > max / h))
return true;
return false;
}
static void vmh264_udc_fill_vpts(struct vdec_h264_hw_s *hw,
int frame_type,
u32 vpts,
u32 vpts_valid);
static int compute_losless_comp_body_size(int width,
int height, int bit_depth_10);
static int compute_losless_comp_header_size(int width, int height);
static int hevc_alloc_mmu(struct vdec_h264_hw_s *hw, int pic_idx,
int pic_width, int pic_height, u16 bit_depth,
unsigned int *mmu_index_adr) {
int cur_buf_idx;
int bit_depth_10 = (bit_depth != 0x00);
int picture_size;
u32 cur_mmu_4k_number;
WRITE_VREG(CURR_CANVAS_CTRL, pic_idx<<24);
cur_buf_idx = READ_VREG(CURR_CANVAS_CTRL)&0xff;
picture_size = compute_losless_comp_body_size(pic_width,
pic_height, bit_depth_10);
cur_mmu_4k_number = ((picture_size+(1<<12)-1) >> 12);
dpb_print(DECODE_ID(hw),
PRINT_FLAG_MMU_DETAIL,
"alloc_mmu new_fb_idx %d picture_size %d cur_mmu_4k_number %d\n",
cur_buf_idx, picture_size, cur_mmu_4k_number);
if (cur_mmu_4k_number > MAX_FRAME_4K_NUM) {
pr_err("hevc_alloc_mmu cur_mmu_4k_number %d unsupport\n",
cur_mmu_4k_number);
return -1;
}
return decoder_mmu_box_alloc_idx(
hw->mmu_box,
cur_buf_idx,
cur_mmu_4k_number,
mmu_index_adr);
}
static int compute_losless_comp_body_size(int width,
int height, int bit_depth_10)
{
int width_x64;
int height_x32;
int bsize;
width_x64 = width + 63;
width_x64 >>= 6;
height_x32 = height + 31;
height_x32 >>= 5;
#ifdef H264_MMU
bsize = (bit_depth_10 ? 4096 : 3264) * width_x64*height_x32;
#else
bsize = (bit_depth_10 ? 4096 : 3072) * width_x64*height_x32;
#endif
return bsize;
}
static int compute_losless_comp_header_size(int width, int height)
{
int width_x64;
int width_x128;
int height_x64;
int hsize;
width_x64 = width + 63;
width_x64 >>= 6;
width_x128 = width + 127;
width_x128 >>= 7;
height_x64 = height + 63;
height_x64 >>= 6;
#ifdef H264_MMU
hsize = 128*width_x64*height_x64;
#else
hsize = 32*width_x128*height_x64;
#endif
return hsize;
}
static int get_dw_size(struct vdec_h264_hw_s *hw, u32 *pdw_buffer_size_u_v_h)
{
int pic_width, pic_height;
int lcu_size = 16;
int dw_buf_size;
u32 dw_buffer_size_u_v;
u32 dw_buffer_size_u_v_h;
int dw_mode = hw->double_write_mode;
pic_width = hw->frame_width;
pic_height = hw->frame_height;
if (dw_mode) {
int pic_width_dw = pic_width /
get_double_write_ratio(hw->double_write_mode);
int pic_height_dw = pic_height /
get_double_write_ratio(hw->double_write_mode);
int pic_width_lcu_dw = (pic_width_dw % lcu_size) ?
pic_width_dw / lcu_size + 1 :
pic_width_dw / lcu_size;
int pic_height_lcu_dw = (pic_height_dw % lcu_size) ?
pic_height_dw / lcu_size + 1 :
pic_height_dw / lcu_size;
int lcu_total_dw = pic_width_lcu_dw * pic_height_lcu_dw;
dw_buffer_size_u_v = lcu_total_dw * lcu_size * lcu_size / 2;
dw_buffer_size_u_v_h = (dw_buffer_size_u_v + 0xffff) >> 16;
/*64k alignment*/
dw_buf_size = ((dw_buffer_size_u_v_h << 16) * 3);
*pdw_buffer_size_u_v_h = dw_buffer_size_u_v_h;
} else {
*pdw_buffer_size_u_v_h = 0;
dw_buf_size = 0;
}
return dw_buf_size;
}
static void hevc_mcr_config_canv2axitbl(struct vdec_h264_hw_s *hw, int restore)
{
int i, size;
u32 canvas_addr;
unsigned long maddr;
int num_buff = hw->dpb.mDPB.size;
int dw_size = 0;
u32 dw_buffer_size_u_v_h;
u32 blkmode = hw->canvas_mode;
int dw_mode = hw->double_write_mode;
canvas_addr = ANC0_CANVAS_ADDR;
for (i = 0; i < num_buff; i++)
WRITE_VREG((canvas_addr + i), i | (i << 8) | (i << 16));
WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, (0x1 << 1) | (0x1 << 2));
size = hw->losless_comp_body_size + hw->losless_comp_header_size;
dw_size = get_dw_size(hw, &dw_buffer_size_u_v_h);
size += dw_size;
if (size > 0)
size += 0x10000;
dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL,
"dw_buffer_size_u_v_h = %d, dw_size = 0x%x, size = 0x%x\n",
dw_buffer_size_u_v_h, dw_size, size);
dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL,
"body_size = %d, header_size = %d, body_size_sao = %d\n",
hw->losless_comp_body_size,
hw->losless_comp_header_size,
hw->losless_comp_body_size_sao);
for (i = 0; i < num_buff; i++) {
if (!restore) {
if (decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box,
HEADER_BUFFER_IDX(i), size,
DRIVER_HEADER_NAME, &maddr) < 0) {
dpb_print(DECODE_ID(hw), 0,
"%s malloc compress header failed %d\n",
DRIVER_HEADER_NAME, i);
return;
}
} else
maddr = hw->buffer_spec[i].alloc_header_addr;
WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA, maddr >> 5);
hw->buffer_spec[i].alloc_header_addr = maddr;
dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL,
"%s : canvas: %d axiaddr:%x size 0x%x\n",
__func__, i, (u32)maddr, size);
if (dw_mode) {
u32 addr;
int canvas_w;
int canvas_h;
canvas_w = hw->frame_width /
get_double_write_ratio(hw->double_write_mode);
canvas_h = hw->frame_height /
get_double_write_ratio(hw->double_write_mode);
if (hw->canvas_mode == 0)
canvas_w = ALIGN(canvas_w, 32);
else
canvas_w = ALIGN(canvas_w, 64);
canvas_h = ALIGN(canvas_h, 32);
hw->buffer_spec[i].dw_y_adr =
maddr + hw->losless_comp_header_size;
hw->buffer_spec[i].dw_y_adr =
((hw->buffer_spec[i].dw_y_adr + 0xffff) >> 16)
<< 16;
hw->buffer_spec[i].dw_u_v_adr =
hw->buffer_spec[i].dw_y_adr
+ (dw_buffer_size_u_v_h << 16) * 2;
hw->buffer_spec[i].buf_adr
= hw->buffer_spec[i].dw_y_adr;
addr = hw->buffer_spec[i].buf_adr;
dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL,
"dw_y_adr = 0x%x, dw_u_v_adr = 0x%x, y_addr = 0x%x, u_addr = 0x%x, v_addr = 0x%x, width = %d, height = %d\n",
hw->buffer_spec[i].dw_y_adr,
hw->buffer_spec[i].dw_u_v_adr,
hw->buffer_spec[i].y_addr,
hw->buffer_spec[i].u_addr,
hw->buffer_spec[i].v_addr,
canvas_w,
canvas_h);
hw->buffer_spec[i].canvas_config[0].phy_addr =
hw->buffer_spec[i].dw_y_adr;
hw->buffer_spec[i].canvas_config[0].width = canvas_w;
hw->buffer_spec[i].canvas_config[0].height = canvas_h;
hw->buffer_spec[i].canvas_config[0].block_mode =
blkmode;
hw->buffer_spec[i].canvas_config[0].endian = 7;
hw->buffer_spec[i].canvas_config[1].phy_addr =
hw->buffer_spec[i].dw_u_v_adr;
hw->buffer_spec[i].canvas_config[1].width = canvas_w;
hw->buffer_spec[i].canvas_config[1].height = canvas_h;
hw->buffer_spec[i].canvas_config[1].block_mode =
blkmode;
hw->buffer_spec[i].canvas_config[1].endian = 7;
}
}
WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1);
WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR, (0 << 8) | (0<<1) | 1);
for (i = 0; i < 32; i++)
WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, 0);
return;
}
static void hevc_mcr_config_mc_ref(struct vdec_h264_hw_s *hw)
{
u32 i;
u32 ref_canv;
struct Slice *pSlice = &(hw->dpb.mSlice);
/*REFLIST[0]*/
for (i = 0; i < (unsigned int)(pSlice->listXsize[0]); i++) {
struct StorablePicture *ref = pSlice->listX[0][i];
if (ref == NULL)
return;
WRITE_VREG(CURR_CANVAS_CTRL, ref->buf_spec_num<<24);
ref_canv = READ_VREG(CURR_CANVAS_CTRL)&0xffffff;
WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR,
(ref->buf_spec_num & 0x3f) << 8);
WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, ref_canv);
}
/*REFLIST[1]*/
for (i = 0; i < (unsigned int)(pSlice->listXsize[1]); i++) {
struct StorablePicture *ref = pSlice->listX[1][i];
if (ref == NULL)
return;
WRITE_VREG(CURR_CANVAS_CTRL, ref->buf_spec_num<<24);
ref_canv = READ_VREG(CURR_CANVAS_CTRL)&0xffffff;
WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR,
(ref->buf_spec_num & 0x3f) << 8);
WRITE_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR, ref_canv);
}
return;
}
static void hevc_mcr_config_mcrcc(struct vdec_h264_hw_s *hw)
{
u32 rdata32;
u32 rdata32_2;
u32 slice_type;
struct StorablePicture *ref;
struct Slice *pSlice;
slice_type = hw->dpb.mSlice.slice_type;
pSlice = &(hw->dpb.mSlice);
WRITE_VREG(HEVCD_MCRCC_CTL1, 0x2);
if (slice_type == I_SLICE) {
WRITE_VREG(HEVCD_MCRCC_CTL1, 0x0);
return;
}
if (slice_type == B_SLICE) {
ref = pSlice->listX[0][0];
if (ref == NULL)
return;
WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR,
((ref->buf_spec_num & 0x3f) << 8));
rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR);
rdata32 = rdata32 & 0xffff;
rdata32 = rdata32 | (rdata32 << 16);
WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32);
ref = pSlice->listX[1][0];
if (ref == NULL)
return;
WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR,
((ref->buf_spec_num & 0x3f) << 8));
rdata32_2 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR);
rdata32_2 = rdata32_2 & 0xffff;
rdata32_2 = rdata32_2 | (rdata32_2 << 16);
if (rdata32 == rdata32_2) {
ref = pSlice->listX[1][1];
if (ref == NULL)
return;
WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR,
((ref->buf_spec_num & 0x3f) << 8));
rdata32_2 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR);
rdata32_2 = rdata32_2 & 0xffff;
rdata32_2 = rdata32_2 | (rdata32_2 << 16);
}
WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32_2);
} else { /*P-PIC*/
ref = pSlice->listX[0][0];
if (ref == NULL)
return;
WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR,
((ref->buf_spec_num & 0x3f) << 8));
rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR);
rdata32 = rdata32 & 0xffff;
rdata32 = rdata32 | (rdata32 << 16);
WRITE_VREG(HEVCD_MCRCC_CTL2, rdata32);
ref = pSlice->listX[0][1];
if (ref == NULL)
return;
WRITE_VREG(HEVCD_MPP_ANC_CANVAS_ACCCONFIG_ADDR,
((ref->buf_spec_num & 0x3f) << 8));
rdata32 = READ_VREG(HEVCD_MPP_ANC_CANVAS_DATA_ADDR);
rdata32 = rdata32 & 0xffff;
rdata32 = rdata32 | (rdata32 << 16);
WRITE_VREG(HEVCD_MCRCC_CTL3, rdata32);
}
WRITE_VREG(HEVCD_MCRCC_CTL1, 0xff0);
return;
}
static void hevc_mcr_sao_global_hw_init(struct vdec_h264_hw_s *hw,
u32 width, u32 height) {
u32 data32;
u32 lcu_x_num, lcu_y_num;
u32 lcu_total;
u32 mc_buffer_size_u_v;
u32 mc_buffer_size_u_v_h;
int dw_mode = hw->double_write_mode;
/*lcu_x_num = (width + 15) >> 4;*/
// width need to be round to 64 pixel -- case0260 1/10/2020
lcu_x_num = (((width + 63) >> 6) << 2);
lcu_y_num = (height + 15) >> 4;
lcu_total = lcu_x_num * lcu_y_num;
hw->mc_buffer_size_u_v = mc_buffer_size_u_v = lcu_total*16*16/2;
hw->mc_buffer_size_u_v_h =
mc_buffer_size_u_v_h = (mc_buffer_size_u_v + 0xffff)>>16;
hw->losless_comp_body_size = 0;
hw->losless_comp_body_size_sao =
compute_losless_comp_body_size(width, height, 0);
hw->losless_comp_header_size =
compute_losless_comp_header_size(width, height);
WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x1); /*sw reset ipp10b_top*/
WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x0); /*sw reset ipp10b_top*/
/* setup lcu_size = 16*/
WRITE_VREG(HEVCD_IPP_TOP_LCUCONFIG, 16); /*set lcu size = 16*/
/*pic_width/pic_height*/
WRITE_VREG(HEVCD_IPP_TOP_FRMCONFIG,
(height & 0xffff) << 16 | (width & 0xffff));
/* bitdepth_luma = 8*/
/* bitdepth_chroma = 8*/
WRITE_VREG(HEVCD_IPP_BITDEPTH_CONFIG, 0x0);/*set bit-depth 8 */
#ifdef H265_LOSLESS_COMPRESS_MODE
WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, (0x1 << 4));
WRITE_VREG(HEVCD_MPP_DECOMP_CTL2, 0x0);
#else
WRITE_VREG(HEVCD_MPP_DECOMP_CTL1, 0x1 << 31);
#endif
data32 = READ_VREG(HEVCD_IPP_AXIIF_CONFIG);
data32 &= (~0x30);
data32 |= (hw->canvas_mode << 4);
WRITE_VREG(HEVCD_IPP_AXIIF_CONFIG, data32);
WRITE_VREG(HEVCD_MPP_DECOMP_CTL3,
(0x80 << 20) | (0x80 << 10) | (0xff));
WRITE_VREG(HEVCD_MPP_VDEC_MCR_CTL, 0x1 | (0x1 << 4));
/*comfig vdec:h264:mdec to use hevc mcr/mcrcc/decomp*/
WRITE_VREG(MDEC_PIC_DC_MUX_CTRL,
READ_VREG(MDEC_PIC_DC_MUX_CTRL) | 0x1 << 31);
/* ipp_enable*/
WRITE_VREG(HEVCD_IPP_TOP_CNTL, 0x1 << 1);
if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) {
WRITE_VREG(HEVC_DBLK_CFG1, 0x2); // set ctusize==16
WRITE_VREG(HEVC_DBLK_CFG2, ((height & 0xffff)<<16) | (width & 0xffff));
if (dw_mode & 0x10)
WRITE_VREG(HEVC_DBLK_CFGB, 0x40405603);
else if (dw_mode)
WRITE_VREG(HEVC_DBLK_CFGB, 0x40405703);
else
WRITE_VREG(HEVC_DBLK_CFGB, 0x40405503);
}
data32 = READ_VREG(HEVC_SAO_CTRL0);
data32 &= (~0xf);
data32 |= 0x4;
WRITE_VREG(HEVC_SAO_CTRL0, data32);
WRITE_VREG(HEVC_SAO_PIC_SIZE, (height & 0xffff) << 16 |
(width & 0xffff));
data32 = ((lcu_x_num-1) | (lcu_y_num-1) << 16);
WRITE_VREG(HEVC_SAO_PIC_SIZE_LCU, data32);
data32 = (lcu_x_num | lcu_y_num << 16);
WRITE_VREG(HEVC_SAO_TILE_SIZE_LCU, data32);
data32 = (mc_buffer_size_u_v_h << 16) << 1;
WRITE_VREG(HEVC_SAO_Y_LENGTH, data32);
data32 = (mc_buffer_size_u_v_h << 16);
WRITE_VREG(HEVC_SAO_C_LENGTH, data32);
data32 = READ_VREG(HEVC_SAO_CTRL1);
data32 &= (~0x3000);
data32 &= (~0xff0);
data32 |= endian; /* Big-Endian per 64-bit */
if (hw->mmu_enable && (dw_mode & 0x10))
data32 |= ((hw->canvas_mode << 12) |1);
else if (hw->mmu_enable && dw_mode)
data32 |= ((hw->canvas_mode << 12));
else
data32 |= ((hw->canvas_mode << 12)|2);
WRITE_VREG(HEVC_SAO_CTRL1, data32);
#ifdef H265_DW_NO_SCALE
WRITE_VREG(HEVC_SAO_CTRL5, READ_VREG(HEVC_SAO_CTRL5) & ~(0xff << 16));
if (hw->mmu_enable && dw_mode) {
data32 = READ_VREG(HEVC_SAO_CTRL5);
data32 &= (~(0xff << 16));
if (dw_mode == 2 ||
dw_mode == 3)
data32 |= (0xff<<16);
else if (dw_mode == 4)
data32 |= (0x33<<16);
WRITE_VREG(HEVC_SAO_CTRL5, data32);
}
#endif
#ifdef H265_LOSLESS_COMPRESS_MODE
data32 = READ_VREG(HEVC_SAO_CTRL5);
data32 |= (1<<9); /*8-bit smem-mode*/
WRITE_VREG(HEVC_SAO_CTRL5, data32);
WRITE_VREG(HEVC_CM_BODY_LENGTH, hw->losless_comp_body_size_sao);
WRITE_VREG(HEVC_CM_HEADER_OFFSET, hw->losless_comp_body_size);
WRITE_VREG(HEVC_CM_HEADER_LENGTH, hw->losless_comp_header_size);
#endif
#ifdef H265_LOSLESS_COMPRESS_MODE
WRITE_VREG(HEVC_SAO_CTRL9, READ_VREG(HEVC_SAO_CTRL9) | (0x1 << 1));
WRITE_VREG(HEVC_SAO_CTRL5, READ_VREG(HEVC_SAO_CTRL5) | (0x1 << 10));
#endif
WRITE_VREG(HEVC_SAO_CTRL9, READ_VREG(HEVC_SAO_CTRL9) | 0x1 << 7);
memset(hw->frame_mmu_map_addr, 0, FRAME_MMU_MAP_SIZE);
WRITE_VREG(MDEC_EXTIF_CFG0, hw->extif_addr);
WRITE_VREG(MDEC_EXTIF_CFG1, 0x80000000);
return;
}
static void hevc_sao_set_slice_type(struct vdec_h264_hw_s *hw,
u32 is_new_pic, u32 is_idr)
{
hw->is_new_pic = is_new_pic;
hw->is_idr_frame = is_idr;
return;
}
static void hevc_sao_set_pic_buffer(struct vdec_h264_hw_s *hw,
struct StorablePicture *pic) {
u32 mc_y_adr;
u32 mc_u_v_adr;
u32 dw_y_adr;
u32 dw_u_v_adr;
u32 canvas_addr;
int ret;
int dw_mode = hw->double_write_mode;
if (hw->is_new_pic != 1)
return;
if (hw->is_idr_frame) {
/* William TBD */
memset(hw->frame_mmu_map_addr, 0, FRAME_MMU_MAP_SIZE);
}
WRITE_VREG(CURR_CANVAS_CTRL, pic->buf_spec_num << 24);
canvas_addr = READ_VREG(CURR_CANVAS_CTRL)&0xffffff;
WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, (0x0 << 1) |
(0x0 << 2) | ((canvas_addr & 0xff) << 8));
mc_y_adr = READ_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA) << 5;
WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, (0x0 << 1) |
(0x0 << 2) | (((canvas_addr >> 8) & 0xff) << 8));
mc_u_v_adr = READ_VREG(HEVCD_MPP_ANC2AXI_TBL_DATA) << 5;
WRITE_VREG(HEVCD_MPP_ANC2AXI_TBL_CONF_ADDR, 0x1);
if (dw_mode) {
dw_y_adr = hw->buffer_spec[pic->buf_spec_num].dw_y_adr;
dw_u_v_adr = hw->buffer_spec[pic->buf_spec_num].dw_u_v_adr;
} else {
dw_y_adr = 0;
dw_u_v_adr = 0;
}
#ifdef H265_LOSLESS_COMPRESS_MODE
if (dw_mode)
WRITE_VREG(HEVC_SAO_Y_START_ADDR, dw_y_adr);
WRITE_VREG(HEVC_CM_BODY_START_ADDR, mc_y_adr);
#ifdef H264_MMU
WRITE_VREG(HEVC_CM_HEADER_START_ADDR, mc_y_adr);
#else
WRITE_VREG(HEVC_CM_HEADER_START_ADDR,
(mc_y_adr + hw->losless_comp_body_size));
#endif
#else
WRITE_VREG(HEVC_SAO_Y_START_ADDR, mc_y_adr);
#endif
#ifndef H265_LOSLESS_COMPRESS_MODE
WRITE_VREG(HEVC_SAO_C_START_ADDR, mc_u_v_adr);
#else
if (dw_mode)
WRITE_VREG(HEVC_SAO_C_START_ADDR, dw_u_v_adr);
#endif
#ifndef LOSLESS_COMPRESS_MODE
if (dw_mode) {
WRITE_VREG(HEVC_SAO_Y_WPTR, mc_y_adr);
WRITE_VREG(HEVC_SAO_C_WPTR, mc_u_v_adr);
}
#else
WRITE_VREG(HEVC_SAO_Y_WPTR, dw_y_adr);
WRITE_VREG(HEVC_SAO_C_WPTR, dw_u_v_adr);
#endif
ret = hevc_alloc_mmu(hw, pic->buf_spec_num,
(hw->mb_width << 4), (hw->mb_height << 4), 0x0,
hw->frame_mmu_map_addr);
if (ret != 0) {
dpb_print(DECODE_ID(hw),
PRINT_FLAG_MMU_DETAIL, "can't alloc need mmu1,idx %d ret =%d\n",
pic->buf_spec_num,
ret);
return;
}
/*Reset SAO + Enable SAO slice_start*/
if (hw->mmu_enable && get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A)
WRITE_VREG(HEVC_DBLK_CFG0, 0x1); // reset buffer32x4 in lpf for every picture
WRITE_VREG(HEVC_SAO_INT_STATUS,
READ_VREG(HEVC_SAO_INT_STATUS) | 0x1 << 28);
WRITE_VREG(HEVC_SAO_INT_STATUS,
READ_VREG(HEVC_SAO_INT_STATUS) | 0x1 << 31);
/*pr_info("hevc_sao_set_pic_buffer:mc_y_adr: %x\n", mc_y_adr);*/
/*Send coommand to hevc-code to supply 4k buffers to sao*/
if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) {
WRITE_VREG(H265_SAO_4K_SET_BASE, (u32)hw->frame_mmu_map_phy_addr);
WRITE_VREG(H265_SAO_4K_SET_COUNT, MAX_FRAME_4K_NUM);
} else
WRITE_VREG(HEVC_ASSIST_MMU_MAP_ADDR, (u32)hw->frame_mmu_map_phy_addr);
WRITE_VREG(SYS_COMMAND, H265_PUT_SAO_4K_SET);
hw->frame_busy = 1;
return;
}
static void hevc_set_unused_4k_buff_idx(struct vdec_h264_hw_s *hw,
u32 buf_spec_num) {
WRITE_VREG(CURR_CANVAS_CTRL, buf_spec_num<<24);
hw->hevc_cur_buf_idx = READ_VREG(CURR_CANVAS_CTRL)&0xff;
dpb_print(DECODE_ID(hw),
PRINT_FLAG_MMU_DETAIL, " %s cur_buf_idx %d buf_spec_num %d\n",
__func__, hw->hevc_cur_buf_idx, buf_spec_num);
return;
}
static void hevc_set_frame_done(struct vdec_h264_hw_s *hw)
{
ulong timeout = jiffies + HZ;
dpb_print(DECODE_ID(hw),
PRINT_FLAG_MMU_DETAIL, "hevc_frame_done...set\n");
while ((READ_VREG(HEVC_SAO_INT_STATUS) & 0x1) == 0) {
if (time_after(jiffies, timeout)) {
dpb_print(DECODE_ID(hw),
PRINT_FLAG_MMU_DETAIL, " %s..timeout!\n", __func__);
break;
}
}
timeout = jiffies + HZ;
while (READ_VREG(HEVC_CM_CORE_STATUS) & 0x1) {
if (time_after(jiffies, timeout)) {
dpb_print(DECODE_ID(hw),
PRINT_FLAG_MMU_DETAIL, " %s cm_core..timeout!\n", __func__);
break;
}
}
WRITE_VREG(HEVC_SAO_INT_STATUS, 0x1);
hw->frame_done = 1;
return;
}
static void release_cur_decoding_buf(struct vdec_h264_hw_s *hw)
{
struct h264_dpb_stru *p_H264_Dpb = &hw->dpb;
if (p_H264_Dpb->mVideo.dec_picture) {
release_picture(p_H264_Dpb,
p_H264_Dpb->mVideo.dec_picture);
p_H264_Dpb->mVideo.dec_picture->data_flag &= ~ERROR_FLAG;
p_H264_Dpb->mVideo.dec_picture = NULL;
if (hw->mmu_enable)
hevc_set_frame_done(hw);
}
}
static void hevc_sao_wait_done(struct vdec_h264_hw_s *hw)
{
ulong timeout = jiffies + HZ;
dpb_print(DECODE_ID(hw),
PRINT_FLAG_MMU_DETAIL, "hevc_sao_wait_done...start\n");
while ((READ_VREG(HEVC_SAO_INT_STATUS) >> 31)) {
if (time_after(jiffies, timeout)) {
dpb_print(DECODE_ID(hw),
PRINT_FLAG_MMU_DETAIL,
"hevc_sao_wait_done...wait timeout!\n");
break;
}
}
timeout = jiffies + HZ;
if ((hw->frame_busy == 1) && (hw->frame_done == 1) ) {
if (get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_G12A) {
WRITE_VREG(SYS_COMMAND, H265_ABORT_SAO_4K_SET);
while ((READ_VREG(SYS_COMMAND) & 0xff) !=
H265_ABORT_SAO_4K_SET_DONE) {
if (time_after(jiffies, timeout)) {
dpb_print(DECODE_ID(hw),
PRINT_FLAG_MMU_DETAIL,
"wait h265_abort_sao_4k_set_done timeout!\n");
break;
}
}
}
amhevc_stop();
hw->frame_busy = 0;
hw->frame_done = 0;
dpb_print(DECODE_ID(hw),
PRINT_FLAG_MMU_DETAIL,
"sao wait done ,hevc stop!\n");
}
return;
}
static void buf_spec_init(struct vdec_h264_hw_s *hw, bool buffer_reset_flag)
{
int i;
unsigned long flags;
spin_lock_irqsave(&hw->bufspec_lock, flags);
for (i = 0; i < VF_POOL_SIZE; i++) {
struct vframe_s *vf = &hw->vfpool[hw->cur_pool][i];
u32 ref_idx = BUFSPEC_INDEX(vf->index);
if ((vf->index != -1) &&
(hw->buffer_spec[ref_idx].vf_ref == 0) &&
(hw->buffer_spec[ref_idx].used != -1)) {
vf->index = -1;
}
}
hw->cur_pool++;
if (hw->cur_pool >= VF_POOL_NUM)
hw->cur_pool = 0;
for (i = 0; i < VF_POOL_SIZE; i++) {
struct vframe_s *vf = &hw->vfpool[hw->cur_pool][i];
u32 ref_idx = BUFSPEC_INDEX(vf->index);
if ((vf->index != -1) &&
(hw->buffer_spec[ref_idx].vf_ref == 0) &&
(hw->buffer_spec[ref_idx].used != -1)) {
vf->index = -1;
}
}
/* buffers are alloced when error reset, v4l must find buffer by buffer_wrap[] */
if (hw->reset_bufmgr_flag && buffer_reset_flag) {
for (i = 0; i < BUFSPEC_POOL_SIZE; i++) {
if (hw->buffer_spec[i].used == 1 || hw->buffer_spec[i].used == 2)
hw->buffer_spec[i].used = 0;
}
} else {
for (i = 0; i < BUFSPEC_POOL_SIZE; i++) {
hw->buffer_spec[i].used = -1;
hw->buffer_spec[i].canvas_pos = -1;
hw->buffer_wrap[i] = -1;
}
}
if (dpb_is_debug(DECODE_ID(hw),
PRINT_FLAG_DUMP_BUFSPEC))
dump_bufspec(hw, __func__);
spin_unlock_irqrestore(&hw->bufspec_lock, flags);
}
/*is active in buf management */
static unsigned char is_buf_spec_in_use(struct vdec_h264_hw_s *hw,
int buf_spec_num)
{
unsigned char ret = 0;
if (hw->buffer_spec[buf_spec_num].used == 1 ||
hw->buffer_spec[buf_spec_num].used == 2 ||
hw->buffer_spec[buf_spec_num].used == 3 ||
hw->buffer_spec[buf_spec_num].used == 5)
ret = 1;
return ret;
}
static unsigned char is_buf_spec_in_disp_q(struct vdec_h264_hw_s *hw,
int buf_spec_num)
{
unsigned char ret = 0;
if (hw->buffer_spec[buf_spec_num].used == 2 ||
hw->buffer_spec[buf_spec_num].used == 3 ||
hw->buffer_spec[buf_spec_num].used == 5)
ret = 1;
return ret;
}
static int alloc_one_buf_spec(struct vdec_h264_hw_s *hw, int i)
{
struct vdec_s *vdec = hw_to_vdec(hw);
if (hw->mmu_enable) {
if (hw->buffer_spec[i].alloc_header_addr)
return 0;
else
return -1;
} else {
int buf_size = (hw->mb_total << 8) + (hw->mb_total << 7);
int addr;
#ifdef VDEC_DW
int orig_buf_size;
orig_buf_size = buf_size;
if (IS_VDEC_DW(hw) == 1)
buf_size += (hw->mb_total << 7) + (hw->mb_total << 6);
else if (IS_VDEC_DW(hw) == 2)
buf_size += (hw->mb_total << 6) + (hw->mb_total << 5);
else if (IS_VDEC_DW(hw) == 4)
buf_size += (hw->mb_total << 4) + (hw->mb_total << 3);
else if (IS_VDEC_DW(hw) == 8)
buf_size += (hw->mb_total << 2) + (hw->mb_total << 1);
if (IS_VDEC_DW(hw)) {
u32 align_size;
/* add align padding size for blk64x32: (mb_w<<4)*32, (mb_h<<4)*64 */
align_size = ((hw->mb_width << 9) + (hw->mb_height << 10)) / IS_VDEC_DW(hw);
/* double align padding size for uv*/
align_size <<= 1;
buf_size += align_size + PAGE_SIZE;
}
#endif
if (hw->buffer_spec[i].cma_alloc_addr)
return 0;
if (decoder_bmmu_box_alloc_buf_phy(hw->bmmu_box, i,
PAGE_ALIGN(buf_size), DRIVER_NAME,
&hw->buffer_spec[i].cma_alloc_addr) < 0) {
hw->buffer_spec[i].cma_alloc_addr = 0;
if (hw->no_mem_count++ > 3) {
hw->stat |= DECODER_FATAL_ERROR_NO_MEM;
hw->reset_bufmgr_flag = 1;
}
dpb_print(DECODE_ID(hw), 0,
"%s, fail to alloc buf for bufspec%d, try later\n",
__func__, i
);
return -1;
} else {
hw->no_mem_count = 0;
hw->stat &= ~DECODER_FATAL_ERROR_NO_MEM;
}
if (!vdec_secure(vdec)) {
/*init internal buf*/
char *tmpbuf = (char *)codec_mm_phys_to_virt(hw->buffer_spec[i].cma_alloc_addr);
if (tmpbuf) {
memset(tmpbuf, 0, PAGE_ALIGN(buf_size));
codec_mm_dma_flush(tmpbuf,
PAGE_ALIGN(buf_size),
DMA_TO_DEVICE);
} else {
tmpbuf = codec_mm_vmap(hw->buffer_spec[i].cma_alloc_addr, PAGE_ALIGN(buf_size));
if (tmpbuf) {
memset(tmpbuf, 0, PAGE_ALIGN(buf_size));
codec_mm_dma_flush(tmpbuf,
PAGE_ALIGN(buf_size),
DMA_TO_DEVICE);
codec_mm_unmap_phyaddr(tmpbuf);
}
}
}
hw->buffer_spec[i].buf_adr =
hw->buffer_spec[i].cma_alloc_addr;
addr = hw->buffer_spec[i].buf_adr;
hw->buffer_spec[i].y_addr = addr;
addr += hw->mb_total << 8;
hw->buffer_spec[i].u_addr = addr;
hw->buffer_spec[i].v_addr = addr;
addr += hw->mb_total << 7;
hw->buffer_spec[i].canvas_config[0].phy_addr =
hw->buffer_spec[i].y_addr;
hw->buffer_spec[i].canvas_config[0].width =
hw->mb_width << 4;
hw->buffer_spec[i].canvas_config[0].height =
hw->mb_height << 4;
hw->buffer_spec[i].canvas_config[0].block_mode =
hw->canvas_mode;
hw->buffer_spec[i].canvas_config[1].phy_addr =
hw->buffer_spec[i].u_addr;
hw->buffer_spec[i].canvas_config[1].width =
hw->mb_width << 4;
hw->buffer_spec[i].canvas_config[1].height =
hw->mb_height << 3;
hw->buffer_spec[i].canvas_config[1].block_mode =
hw->canvas_mode;
dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS,
"%s, alloc buf for bufspec%d\n",
__func__, i);
#ifdef VDEC_DW
if (!IS_VDEC_DW(hw))
return 0;
else {
int w_shift = 3, h_shift = 3;
if (IS_VDEC_DW(hw) == 1) {
w_shift = 3;
h_shift = 4;
} else if (IS_VDEC_DW(hw) == 2) {
w_shift = 3;
h_shift = 3;
} else if (IS_VDEC_DW(hw) == 4) {
w_shift = 2;
h_shift = 2;
} else if (IS_VDEC_DW(hw) == 8) {
w_shift = 1;
h_shift = 1;
}
addr = hw->buffer_spec[i].cma_alloc_addr + PAGE_ALIGN(orig_buf_size);
hw->buffer_spec[i].vdec_dw_y_addr = addr;
addr += ALIGN_WIDTH(hw->mb_width << w_shift) * ALIGN_HEIGHT(hw->mb_height << h_shift);
hw->buffer_spec[i].vdec_dw_u_addr = addr;
hw->buffer_spec[i].vdec_dw_v_addr = addr;
addr += hw->mb_total << (w_shift + h_shift - 1);
hw->buffer_spec[i].vdec_dw_canvas_config[0].phy_addr =
hw->buffer_spec[i].vdec_dw_y_addr;
hw->buffer_spec[i].vdec_dw_canvas_config[0].width =
ALIGN_WIDTH(hw->mb_width << w_shift);
hw->buffer_spec[i].vdec_dw_canvas_config[0].height =
ALIGN_HEIGHT(hw->mb_height << h_shift);
hw->buffer_spec[i].vdec_dw_canvas_config[0].block_mode =
hw->canvas_mode;
hw->buffer_spec[i].vdec_dw_canvas_config[1].phy_addr =
hw->buffer_spec[i].vdec_dw_u_addr;
hw->buffer_spec[i].vdec_dw_canvas_config[1].width =
ALIGN_WIDTH(hw->mb_width << w_shift);
hw->buffer_spec[i].vdec_dw_canvas_config[1].height =
ALIGN_HEIGHT(hw->mb_height << (h_shift - 1));
hw->buffer_spec[i].vdec_dw_canvas_config[1].block_mode =
hw->canvas_mode;
dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS,
"%s, vdec_dw: alloc buf for bufspec%d blkmod %d\n",
__func__, i, hw->canvas_mode);
}
#endif
}
return 0;
}
static void vh264_put_video_frame(void *vdec_ctx, struct vframe_s *vf)
{
vh264_vf_put(vf, vdec_ctx);
}
static void vh264_get_video_frame(void *vdec_ctx, struct vframe_s **vf)
{
*vf = vh264_vf_get(vdec_ctx);
}
static struct task_ops_s task_dec_ops = {
.type = TASK_TYPE_DEC,
.get_vframe = vh264_get_video_frame,
.put_vframe = vh264_put_video_frame,
};
static int alloc_one_buf_spec_from_queue(struct vdec_h264_hw_s *hw, int idx)
{
int ret = 0;
struct aml_vcodec_ctx *ctx = NULL;
struct buffer_spec_s *bs = &hw->buffer_spec[idx];
struct canvas_config_s *y_canvas_cfg = NULL;
struct canvas_config_s *c_canvas_cfg = NULL;
struct vdec_v4l2_buffer *fb = NULL;
unsigned int y_addr = 0, c_addr = 0;
if (IS_ERR_OR_NULL(hw->v4l2_ctx)) {
pr_err("the v4l context has err.\n");
return -1;
}
if (bs->cma_alloc_addr)
return 0;
ctx = (struct aml_vcodec_ctx *)(hw->v4l2_ctx);
dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
"[%d] %s(), try alloc from v4l queue buf size: %d\n",
ctx->id, __func__,
(hw->mb_total << 8) + (hw->mb_total << 7));
ret = ctx->fb_ops.alloc(&ctx->fb_ops, hw->fb_token, &fb, AML_FB_REQ_DEC);
if (ret < 0) {
dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
"[%d] get fb fail.\n", ctx->id);
return ret;
}
bs->cma_alloc_addr = (unsigned long)fb;
dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
"[%d] %s(), cma alloc addr: 0x%x, out %d dec %d\n",
ctx->id, __func__, bs->cma_alloc_addr,
ctx->cap_pool.out, ctx->cap_pool.dec);
if (fb->num_planes == 1) {
y_addr = fb->m.mem[0].addr;
c_addr = fb->m.mem[0].addr + fb->m.mem[0].offset;
fb->m.mem[0].bytes_used = fb->m.mem[0].size;
} else if (fb->num_planes == 2) {
y_addr = fb->m.mem[0].addr;
c_addr = fb->m.mem[1].addr;
fb->m.mem[0].bytes_used = fb->m.mem[0].size;
fb->m.mem[1].bytes_used = fb->m.mem[1].size;
}
fb->task->attach(fb->task, &task_dec_ops, hw_to_vdec(hw));
fb->status = FB_ST_DECODER;
dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
"[%d] %s(), y_addr: %x, size: %u\n",
ctx->id, __func__, y_addr, fb->m.mem[0].size);
dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
"[%d] %s(), c_addr: %x, size: %u\n",
ctx->id, __func__, c_addr, fb->m.mem[1].size);
bs->y_addr = y_addr;
bs->u_addr = c_addr;
bs->v_addr = c_addr;
y_canvas_cfg = &bs->canvas_config[0];
c_canvas_cfg = &bs->canvas_config[1];
y_canvas_cfg->phy_addr = y_addr;
y_canvas_cfg->width = hw->mb_width << 4;
y_canvas_cfg->height = hw->mb_height << 4;
y_canvas_cfg->block_mode = hw->canvas_mode;
//fb->m.mem[0].bytes_used = y_canvas_cfg->width * y_canvas_cfg->height;
dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
"[%d] %s(), y_w: %d, y_h: %d\n", ctx->id, __func__,
y_canvas_cfg->width,y_canvas_cfg->height);
c_canvas_cfg->phy_addr = c_addr;
c_canvas_cfg->width = hw->mb_width << 4;
c_canvas_cfg->height = hw->mb_height << 3;
c_canvas_cfg->block_mode = hw->canvas_mode;
//fb->m.mem[1].bytes_used = c_canvas_cfg->width * c_canvas_cfg->height;
dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
"[%d] %s(), c_w: %d, c_h: %d\n", ctx->id, __func__,
c_canvas_cfg->width, c_canvas_cfg->height);
dpb_print(DECODE_ID(hw), PRINT_FLAG_V4L_DETAIL,
"[%d] %s(), alloc buf for bufspec%d\n", ctx->id, __func__, idx);
return ret;
}
static void config_decode_canvas(struct vdec_h264_hw_s *hw, int i)
{
int blkmode = hw->canvas_mode;
int endian = 0;
if (blkmode == CANVAS_BLKMODE_LINEAR) {
if ((h264_debug_flag & IGNORE_PARAM_FROM_CONFIG) == 0)
endian = 7;
else
endian = 0;
}
if (hw->is_used_v4l)
endian = 7;
config_cav_lut_ex(hw->buffer_spec[i].
y_canvas_index,
hw->buffer_spec[i].y_addr,
hw->mb_width << 4,
hw->mb_height << 4,
CANVAS_ADDR_NOWRAP,
blkmode,
endian,
VDEC_1);
if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) {
WRITE_VREG(VDEC_ASSIST_CANVAS_BLK32,
(1 << 11) | /* canvas_blk32_wr */
(blkmode << 10) | /* canvas_blk32*/
(1 << 8) | /* canvas_index_wr*/
(hw->buffer_spec[i].y_canvas_index << 0) /* canvas index*/
);
}
config_cav_lut_ex(hw->buffer_spec[i].
u_canvas_index,
hw->buffer_spec[i].u_addr,
hw->mb_width << 4,
hw->mb_height << 3,
CANVAS_ADDR_NOWRAP,
blkmode,
endian,
VDEC_1);
if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) {
WRITE_VREG(VDEC_ASSIST_CANVAS_BLK32,
(1 << 11) |
(blkmode << 10) |
(1 << 8) |
(hw->buffer_spec[i].u_canvas_index << 0));
}
WRITE_VREG(ANC0_CANVAS_ADDR + hw->buffer_spec[i].canvas_pos,
spec2canvas(&hw->buffer_spec[i]));
#ifdef VDEC_DW
if (!IS_VDEC_DW(hw))
return;
else {
config_cav_lut_ex(hw->buffer_spec[i].
vdec_dw_y_canvas_index,
hw->buffer_spec[i].vdec_dw_canvas_config[0].phy_addr,
hw->buffer_spec[i].vdec_dw_canvas_config[0].width,
hw->buffer_spec[i].vdec_dw_canvas_config[0].height,
CANVAS_ADDR_NOWRAP,
blkmode,
endian,
VDEC_1);
if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) {
WRITE_VREG(VDEC_ASSIST_CANVAS_BLK32,
(1 << 11) |
(blkmode << 10) |
(1 << 8) |
(hw->buffer_spec[i].vdec_dw_y_canvas_index << 0));
}
config_cav_lut_ex(hw->buffer_spec[i].
vdec_dw_u_canvas_index,
hw->buffer_spec[i].vdec_dw_canvas_config[1].phy_addr,
hw->buffer_spec[i].vdec_dw_canvas_config[1].width,
hw->buffer_spec[i].vdec_dw_canvas_config[1].height,
CANVAS_ADDR_NOWRAP,
blkmode,
endian,
VDEC_1);
if (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_G12A) {
WRITE_VREG(VDEC_ASSIST_CANVAS_BLK32,
(1 << 11) |
(blkmode << 10) |
(1 << 8) |
(hw->buffer_spec[i].vdec_dw_u_canvas_index << 0));
}
}
#endif
}
static void config_decode_canvas_ex(struct vdec_h264_hw_s *hw, int i)
{
u32 blkmode = hw->canvas_mode;
int canvas_w;
int canvas_h;
canvas_w = hw->frame_width /
get_double_write_ratio(hw->double_write_mode);
canvas_h = hw->frame_height /
get_double_write_ratio(hw->double_write_mode);
if (hw->canvas_mode == 0)
canvas_w = ALIGN(canvas_w, 32);
else
canvas_w = ALIGN(canvas_w, 64);
canvas_h = ALIGN(canvas_h, 32);
config_cav_lut_ex(hw->buffer_spec[i].
y_canvas_index,
hw->buffer_spec[i].dw_y_adr,
canvas_w,
canvas_h,
CANVAS_ADDR_NOWRAP,
blkmode,
7,
VDEC_HEVC);
config_cav_lut_ex(hw->buffer_spec[i].
u_canvas_index,
hw->buffer_spec[i].dw_u_v_adr,
canvas_w,
canvas_h,
CANVAS_ADDR_NOWRAP,
blkmode,
7,
VDEC_HEVC);
}
static int v4l_get_free_buffer_spec(struct vdec_h264_hw_s *hw)
{
int i;
for (i = 0; i < BUFSPEC_POOL_SIZE; i++) {
if (hw->buffer_spec[i].cma_alloc_addr == 0)
return i;
}
return -1;
}
static int v4l_find_buffer_spec_idx(struct vdec_h264_hw_s *hw, unsigned int v4l_indx)
{
int i;
for (i = 0; i < BUFSPEC_POOL_SIZE; i++) {
if (hw->buffer_wrap[i] == v4l_indx)
return i;
}
return -1;
}
static int v4l_get_free_buf_idx(struct vdec_s *vdec)
{
struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private;
struct aml_vcodec_ctx * v4l = hw->v4l2_ctx;
struct v4l_buff_pool *pool = &v4l->cap_pool;
struct buffer_spec_s *pic = NULL;
int i, rt, idx = INVALID_IDX;
ulong flags;
u32 state = 0, index;
spin_lock_irqsave(&hw->bufspec_lock, flags);
for (i = 0; i < pool->in; ++i) {
state = (pool->seq[i] >> 16);
index = (pool->seq[i] & 0xffff);
switch (state) {
case V4L_CAP_BUFF_IN_DEC:
rt = v4l_find_buffer_spec_idx(hw, index);
if (rt >= 0) {
pic = &hw->buffer_spec[rt];
if ((pic->vf_ref == 0) &&
(pic->used == 0) &&
pic->cma_alloc_addr) {
idx = rt;
}
}
break;
case V4L_CAP_BUFF_IN_M2M:
rt = v4l_get_free_buffer_spec(hw);
if (rt >= 0) {
pic = &hw->buffer_spec[rt];
if (!alloc_one_buf_spec_from_queue(hw, rt)) {
struct vdec_v4l2_buffer *fb;
config_decode_canvas(hw, rt);
fb = (struct vdec_v4l2_buffer *)pic->cma_alloc_addr;
hw->buffer_wrap[rt] = fb->buf_idx;
idx = rt;
}
}
break;
default:
break;
}
if (idx != INVALID_IDX) {
pic->used = 1;
break;
}
}
spin_unlock_irqrestore(&hw->bufspec_lock, flags);
if (idx < 0) {
dpb_print(DECODE_ID(hw), 0, "%s fail, state %d\n", __func__, state);
for (i = 0; i < BUFSPEC_POOL_SIZE; i++) {
dpb_print(DECODE_ID(hw), 0, "%s, %d\n",
__func__, hw->buffer_wrap[i]);
}
vmh264_dump_state(vdec);
} else {
struct vdec_v4l2_buffer *fb =
(struct vdec_v4l2_buffer *)pic->cma_alloc_addr;
fb->status = FB_ST_DECODER;
v4l->aux_infos.bind_sei_buffer(v4l, &pic->aux_data_buf,
&pic->aux_data_size, &pic->ctx_buf_idx);
}
return idx;
}
int get_free_buf_idx(struct vdec_s *vdec)
{
int i;
unsigned long addr, flags;
int index = -1;
struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private;
int buf_total = BUFSPEC_POOL_SIZE;
if (hw->is_used_v4l)
return v4l_get_free_buf_idx(vdec);
spin_lock_irqsave(&hw->bufspec_lock, flags);
/*hw->start_search_pos = 0;*/
for (i = hw->start_search_pos; i < buf_total; i++) {
if (hw->mmu_enable)
addr = hw->buffer_spec[i].alloc_header_addr;
else
addr = hw->buffer_spec[i].cma_alloc_addr;
if (hw->buffer_spec[i].vf_ref == 0 &&
hw->buffer_spec[i].used == 0 && addr) {
hw->buffer_spec[i].used = 1;
hw->start_search_pos = i+1;
index = i;
hw->buffer_wrap[i] = index;
break;
}
}
if (index < 0) {
for (i = 0; i < hw->start_search_pos; i++) {
if (hw->mmu_enable)
addr = hw->buffer_spec[i].alloc_header_addr;
else
addr = hw->buffer_spec[i].cma_alloc_addr;
if (hw->buffer_spec[i].vf_ref == 0 &&
hw->buffer_spec[i].used == 0 && addr) {
hw->buffer_spec[i].used = 1;
hw->start_search_pos = i+1;
index = i;
hw->buffer_wrap[i] = index;
break;
}
}
}
spin_unlock_irqrestore(&hw->bufspec_lock, flags);
if (hw->start_search_pos >= buf_total)
hw->start_search_pos = 0;
dpb_print(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL,
"%s, buf_spec_num %d\n", __func__, index);
if (index < 0) {
dpb_print(DECODE_ID(hw), PRINT_FLAG_ERROR,
"%s fail\n", __func__);
vmh264_dump_state(vdec);
}
if (dpb_is_debug(DECODE_ID(hw),
PRINT_FLAG_DUMP_BUFSPEC))
dump_bufspec(hw, __func__);
return index;
}
int release_buf_spec_num(struct vdec_s *vdec, int buf_spec_num)
{
/*u32 cur_buf_idx;*/
unsigned long flags;
struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private;
dpb_print(DECODE_ID(hw), PRINT_FLAG_MMU_DETAIL,
"%s buf_spec_num %d used %d\n",
__func__, buf_spec_num,
buf_spec_num > 0 ? hw->buffer_spec[buf_spec_num].used : 0);
if (buf_spec_num >= 0 &&
buf_spec_num < BUFSPEC_POOL_SIZE
) {
spin_lock_irqsave(&hw->bufspec_lock, flags);
hw->buffer_spec[buf_spec_num].used = 0;
spin_unlock_irqrestore(&hw->bufspec_lock, flags);
if (hw->mmu_enable) {
/*WRITE_VREG(CURR_CANVAS_CTRL, buf_spec_num<<24);
cur_buf_idx = READ_VREG(CURR_CANVAS_CTRL);
cur_buf_idx = cur_buf_idx&0xff;*/
decoder_mmu_box_free_idx(hw->mmu_box, buf_spec_num);
}
release_aux_data(hw, buf_spec_num);
}
if (dpb_is_debug(DECODE_ID(hw),
PRINT_FLAG_DUMP_BUFSPEC))
dump_bufspec(hw, __func__);
return 0;
}
static void config_buf_specs(struct vdec_s *vdec)
{
int i, j;
unsigned long flags;
struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private;
int mode = IS_VDEC_DW(hw) ? 2 : 1;
spin_lock_irqsave(&hw->bufspec_lock, flags);
for (i = 0, j = 0;
j < hw->dpb.mDPB.size
&& i < BUFSPEC_POOL_SIZE;
i++) {
int canvas;
if (hw->buffer_spec[i].used != -1)
continue;
if (vdec->parallel_dec == 1) {
if (hw->buffer_spec[i].y_canvas_index == -1)
hw->buffer_spec[i].y_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
if (hw->buffer_spec[i].u_canvas_index == -1) {
hw->buffer_spec[i].u_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
hw->buffer_spec[i].v_canvas_index = hw->buffer_spec[i].u_canvas_index;
}
#ifdef VDEC_DW
if (IS_VDEC_DW(hw)) {
if (hw->buffer_spec[i].vdec_dw_y_canvas_index == -1)
hw->buffer_spec[i].vdec_dw_y_canvas_index =
vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
if (hw->buffer_spec[i].vdec_dw_u_canvas_index == -1) {
hw->buffer_spec[i].vdec_dw_u_canvas_index =
vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
hw->buffer_spec[i].vdec_dw_v_canvas_index =
hw->buffer_spec[i].vdec_dw_u_canvas_index;
}
}
#endif
} else {
canvas = vdec->get_canvas(j * mode, 2);
hw->buffer_spec[i].y_canvas_index = canvas_y(canvas);
hw->buffer_spec[i].u_canvas_index = canvas_u(canvas);
hw->buffer_spec[i].v_canvas_index = canvas_v(canvas);
dpb_print(DECODE_ID(hw),
PRINT_FLAG_DPB_DETAIL,
"config canvas (%d) %x for bufspec %d\r\n",
j, canvas, i);
#ifdef VDEC_DW
if (IS_VDEC_DW(hw)) {
canvas = vdec->get_canvas(j * mode + 1, 2);
hw->buffer_spec[i].vdec_dw_y_canvas_index = canvas_y(canvas);
hw->buffer_spec[i].vdec_dw_u_canvas_index = canvas_u(canvas);
hw->buffer_spec[i].vdec_dw_v_canvas_index = canvas_v(canvas);
dpb_print(DECODE_ID(hw),
PRINT_FLAG_DPB_DETAIL,
"vdec_dw: config canvas (%d) %x for bufspec %d\r\n",
j, canvas, i);
}
#endif
}
hw->buffer_spec[i].used = 0;
hw->buffer_spec[i].canvas_pos = j;
j++;
}
spin_unlock_irqrestore(&hw->bufspec_lock, flags);
}
static void config_buf_specs_ex(struct vdec_s *vdec)
{
int i, j;
unsigned long flags;
struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private;
int mode = IS_VDEC_DW(hw) ? 2 : 1;
spin_lock_irqsave(&hw->bufspec_lock, flags);
for (i = 0, j = 0;
j < hw->dpb.mDPB.size
&& i < BUFSPEC_POOL_SIZE;
i++) {
int canvas = 0;
if (hw->buffer_spec[i].used != -1)
continue;
if (vdec->parallel_dec == 1) {
if (hw->buffer_spec[i].y_canvas_index == -1)
hw->buffer_spec[i].y_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
if (hw->buffer_spec[i].u_canvas_index == -1) {
hw->buffer_spec[i].u_canvas_index = vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
hw->buffer_spec[i].v_canvas_index = hw->buffer_spec[i].u_canvas_index;
}
#ifdef VDEC_DW
if (IS_VDEC_DW(hw)) {
if (hw->buffer_spec[i].vdec_dw_y_canvas_index == -1)
hw->buffer_spec[i].vdec_dw_y_canvas_index =
vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
if (hw->buffer_spec[i].vdec_dw_u_canvas_index == -1) {
hw->buffer_spec[i].vdec_dw_u_canvas_index =
vdec->get_canvas_ex(CORE_MASK_VDEC_1, vdec->id);
hw->buffer_spec[i].vdec_dw_v_canvas_index =
hw->buffer_spec[i].vdec_dw_u_canvas_index;
}
}
#endif
} else {
canvas = vdec->get_canvas(j* mode, 2);
hw->buffer_spec[i].y_canvas_index = canvas_y(canvas);
hw->buffer_spec[i].u_canvas_index = canvas_u(canvas);
hw->buffer_spec[i].v_canvas_index = canvas_v(canvas);
dpb_print(DECODE_ID(hw),
PRINT_FLAG_DPB_DETAIL,
"config canvas (%d) %x for bufspec %d\r\n",
j, canvas, i);
#ifdef VDEC_DW
if (IS_VDEC_DW(hw)) {
canvas = vdec->get_canvas(j*mode + 1, 2);
hw->buffer_spec[i].vdec_dw_y_canvas_index = canvas_y(canvas);
hw->buffer_spec[i].vdec_dw_u_canvas_index = canvas_u(canvas);
hw->buffer_spec[i].vdec_dw_v_canvas_index = canvas_v(canvas);
dpb_print(DECODE_ID(hw),
PRINT_FLAG_DPB_DETAIL,
"vdec_dw: config canvas (%d) %x for bufspec %d\r\n",
j, canvas, i);
}
#endif
}
hw->buffer_spec[i].used = 0;
hw->buffer_spec[i].alloc_header_addr = 0;
hw->buffer_spec[i].canvas_pos = j;
j++;
}
spin_unlock_irqrestore(&hw->bufspec_lock, flags);
}
static void dealloc_buf_specs(struct vdec_h264_hw_s *hw,
unsigned char release_all)
{
int i;
unsigned long flags;
unsigned char dealloc_flag = 0;
for (i = 0; i < BUFSPEC_POOL_SIZE; i++) {
if (hw->buffer_spec[i].used == 4 ||
release_all) {
dealloc_flag = 1;
dpb_print(DECODE_ID(hw),
PRINT_FLAG_DPB_DETAIL,
"%s buf_spec_num %d\n",
__func__, i
);
spin_lock_irqsave
(&hw->bufspec_lock, flags);
hw->buffer_spec[i].used = -1;
spin_unlock_irqrestore
(&hw->bufspec_lock, flags);
release_aux_data(hw, i);
if (!hw->mmu_enable) {
if (hw->buffer_spec[i].cma_alloc_addr) {
if (!hw->is_used_v4l) {
decoder_bmmu_box_free_idx(
hw->bmmu_box,
i);
}
spin_lock_irqsave
(&hw->bufspec_lock, flags);
hw->buffer_spec[i].cma_alloc_addr = 0;
hw->buffer_spec[i].buf_adr = 0;
spin_unlock_irqrestore
(&hw->bufspec_lock, flags);
}
} else {
if (hw->buffer_spec[i].alloc_header_addr) {
decoder_mmu_box_free_idx(
hw->mmu_box,
i);
spin_lock_irqsave
(&hw->bufspec_lock, flags);
hw->buffer_spec[i].
alloc_header_addr = 0;
hw->buffer_spec[i].buf_adr = 0;
spin_unlock_irqrestore
(&hw->bufspec_lock, flags);
}
}
}
}
if (dealloc_flag &&
dpb_is_debug(DECODE_ID(hw),
PRINT_FLAG_DUMP_BUFSPEC))
dump_bufspec(hw, __func__);
return;
}
unsigned char have_free_buf_spec(struct vdec_s *vdec)
{
int i;
unsigned long addr;
struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private;
struct aml_vcodec_ctx * ctx = hw->v4l2_ctx;
int canvas_pos_min = BUFSPEC_POOL_SIZE;
int index = -1;
int ret = 0;
int allocated_count = 0;
if (hw->is_used_v4l) {
struct h264_dpb_stru *dpb = &hw->dpb;
int free_count = 0;
int used_count = 0;
/* trigger to parse head data. */
if (!hw->v4l_params_parsed)
return 1;
if (dpb->mDPB.used_size >= dpb->mDPB.size - 1)
return 0;
if (ctx->cap_pool.dec < hw->dpb.mDPB.size) {
if (ctx->fb_ops.query(&ctx->fb_ops, &hw->fb_token)) {
free_count =
v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) + 1;
}
}
for (i = 0; i < hw->dpb.mDPB.size; i++) {
if (hw->buffer_spec[i].used == 0 &&
hw->buffer_spec[i].vf_ref == 0 &&
hw->buffer_spec[i].cma_alloc_addr) {
free_count++;
} else if (hw->buffer_spec[i].cma_alloc_addr)
used_count++;
}
ATRACE_COUNTER("V_ST_DEC-free_buff_count", free_count);
ATRACE_COUNTER("V_ST_DEC-used_buff_count", used_count);
return free_count >= run_ready_min_buf_num ? 1 : 0;
}
for (i = 0; i < BUFSPEC_POOL_SIZE; i++) {
if (hw->mmu_enable)
addr = hw->buffer_spec[i].alloc_header_addr;
else
addr = hw->buffer_spec[i].cma_alloc_addr;
if (hw->buffer_spec[i].used == 0 &&
hw->buffer_spec[i].vf_ref == 0) {
if (addr)
return 1;
if (hw->buffer_spec[i].canvas_pos < canvas_pos_min) {
canvas_pos_min = hw->buffer_spec[i].canvas_pos;
index = i;
}
}
if (addr)
allocated_count++;
}
if (index >= 0) {
mutex_lock(&vmh264_mutex);
dealloc_buf_specs(hw, 0);
if (max_alloc_buf_count == 0 ||
allocated_count < max_alloc_buf_count) {
if (alloc_one_buf_spec(hw, index) >= 0)
ret = 1;
}
mutex_unlock(&vmh264_mutex);
}
return ret;
}
static int get_buf_spec_by_canvas_pos(struct vdec_h264_hw_s *hw,
int canvas_pos)
{
int i;
int j = 0;
for (i = 0; i < BUFSPEC_POOL_SIZE; i++) {
if (hw->buffer_spec[i].canvas_pos >= 0) {
if (j == canvas_pos)
return i;
j++;
}
}
return -1;
}
static void update_vf_memhandle(struct vdec_h264_hw_s *hw,
struct vframe_s *vf, int index)
{
if (index < 0) {
vf->mem_handle = NULL;
vf->mem_head_handle = NULL;
} else if (vf->type & VIDTYPE_SCATTER) {
vf->mem_handle =
decoder_mmu_box_get_mem_handle(
hw->mmu_box, index);
vf->mem_head_handle =
decoder_bmmu_box_get_mem_handle(
hw->bmmu_box, HEADER_BUFFER_IDX(index));
} else {
vf->mem_handle =
decoder_bmmu_box_get_mem_handle(
hw->bmmu_box, VF_BUFFER_IDX(index));
/* vf->mem_head_handle =
decoder_bmmu_box_get_mem_handle(
hw->bmmu_box, HEADER_BUFFER_IDX(index));*/
}
return;
}
static int check_force_interlace(struct vdec_h264_hw_s *hw,
struct FrameStore *frame)
{
struct h264_dpb_stru *p_H264_Dpb = &hw->dpb;
int bForceInterlace = 0;
/* no di in secure mode, disable force di */
if (vdec_secure(hw_to_vdec(hw)))
return 0;
if (hw->is_used_v4l)
return 0;
if ((dec_control & DEC_CONTROL_FLAG_FORCE_2997_1080P_INTERLACE)
&& hw->bitstream_restriction_flag
&& (hw->frame_width == 1920)
&& (hw->frame_height >= 1080) /* For being compatible with a fake progressive stream which is interlaced actually*/
&& (hw->frame_dur == 3203 || (hw->frame_dur == 3840 && p_H264_Dpb->mSPS.profile_idc == 100 &&
p_H264_Dpb->mSPS.level_idc == 40))) {
bForceInterlace = 1;
} else if ((dec_control & DEC_CONTROL_FLAG_FORCE_2500_576P_INTERLACE)
&& (hw->frame_width == 720)
&& (hw->frame_height == 576)
&& (hw->frame_dur == 3840)) {
bForceInterlace = 1;
}
if (hw->is_used_v4l && (bForceInterlace == 0) && frame->frame) {
bForceInterlace = (frame->frame->mb_aff_frame_flag)?1:0;
}
return bForceInterlace;
}
static void fill_frame_info(struct vdec_h264_hw_s *hw, struct FrameStore *frame)
{
struct vframe_qos_s *vframe_qos = &hw->vframe_qos;
if (frame->slice_type == I_SLICE)
vframe_qos->type = 1;
else if (frame->slice_type == P_SLICE)
vframe_qos->type = 2;
else if (frame->slice_type == B_SLICE)
vframe_qos->type = 3;
if (input_frame_based(hw_to_vdec(hw)))
vframe_qos->size = frame->frame_size2;
else
vframe_qos->size = frame->frame_size;
vframe_qos->pts = frame->pts64;
vframe_qos->max_mv = frame->max_mv;
vframe_qos->avg_mv = frame->avg_mv;
vframe_qos->min_mv = frame->min_mv;
/*
pr_info("mv: max:%d, avg:%d, min:%d\n",
vframe_qos->max_mv,
vframe_qos->avg_mv,
vframe_qos->min_mv);
*/
vframe_qos->max_qp = frame->max_qp;
vframe_qos->avg_qp = frame->avg_qp;
vframe_qos->min_qp = frame->min_qp;
/*
pr_info("qp: max:%d, avg:%d, min:%d\n",
vframe_qos->max_qp,
vframe_qos->avg_qp,
vframe_qos->min_qp);
*/
vframe_qos->max_skip = frame->max_skip;
vframe_qos->avg_skip = frame->avg_skip;
vframe_qos->min_skip = frame->min_skip;
/*
pr_info("skip: max:%d, avg:%d, min:%d\n",
vframe_qos->max_skip,
vframe_qos->avg_skip,
vframe_qos->min_skip);
*/
vframe_qos->num++;
}
static int is_iframe(struct FrameStore *frame) {
if (frame->frame && frame->frame->slice_type == I_SLICE) {
return 1;
}
return 0;
}
static int post_prepare_process(struct vdec_s *vdec, struct FrameStore *frame)
{
struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private;
int buffer_index = frame->buf_spec_num;
if (buffer_index < 0 || buffer_index >= BUFSPEC_POOL_SIZE) {
dpb_print(DECODE_ID(hw), 0,
"%s, buffer_index 0x%x is beyond range\n",
__func__, buffer_index);
return -1;
}
if (force_disp_bufspec_num & 0x100) {
/*recycle directly*/
if (hw->buffer_spec[frame->buf_spec_num].used != 3 &&
hw->buffer_spec[frame->buf_spec_num].used != 5)
set_frame_output_flag(&hw->dpb, frame->index);
/*make pre_output not set*/
return -1;
}
if (hw->error_proc_policy & 0x1000) {
int error_skip_i_count = (error_skip_count >> 12) & 0xf;
int error_skip_frame_count = error_skip_count & 0xfff;
if (((hw->no_error_count < error_skip_frame_count)
&& (error_skip_i_count == 0 ||
hw->no_error_i_count < error_skip_i_count))
&& (!(frame->data_flag & I_FLAG)))
frame->data_flag |= ERROR_FLAG;
}
dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG,
"%s, buffer_index 0x%x frame_error %x poc %d hw error %x hw error_proc_policy %x\n",
__func__, buffer_index,
frame->data_flag & ERROR_FLAG,
frame->poc, hw->data_flag & ERROR_FLAG,
hw->error_proc_policy);
if (frame->frame == NULL &&
((frame->is_used == 1 && frame->top_field)
|| (frame->is_used == 2 && frame->bottom_field))) {
if (hw->i_only) {
if (frame->is_used == 1)
dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS,
"%s No bottom_field !! frame_num %d used %d\n",
__func__, frame->frame_num, frame->is_used);
if (frame->is_used == 2)
dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS,
"%s No top_field !! frame_num %d used %d\n",
__func__, frame->frame_num, frame->is_used);
}
else {
frame->data_flag |= ERROR_FLAG;
dpb_print(DECODE_ID(hw), PRINT_FLAG_ERRORFLAG_DBG,
"%s Error frame_num %d used %d\n",
__func__, frame->frame_num, frame->is_used);
}
}
if (vdec_stream_based(vdec) && !(frame->data_flag & NODISP_FLAG)) {
if ((vdec->vbuf.no_parser == 0) || (vdec->vbuf.use_ptsserv)) {
if ((pts_lookup_offset_us64(PTS_TYPE_VIDEO,
frame->offset_delimiter, &frame->pts, &frame->frame_size,
0, &frame->pts64) == 0)) {
if ((lookup_check_conut && (atomic_read(&hw->vf_pre_count) > lookup_check_conut) &&
(hw->wrong_frame_count > hw->right_frame_count)) &&
((frame->decoded_frame_size * 2 < frame->frame_size))) {
/*resolve many frame only one check in pts, cause playback unsmooth issue*/
frame->pts64 = hw->last_pts64 +DUR2PTS(hw->frame_dur) ;
frame->pts = hw->last_pts + DUR2PTS(hw->frame_dur);
}
hw->right_frame_count++;
} else {
frame->pts64 = hw->last_pts64 +DUR2PTS(hw->frame_dur) ;
frame->pts = hw->last_pts + DUR2PTS(hw->frame_dur);
hw->wrong_frame_count++;
}
}
dpb_print(DECODE_ID(hw), PRINT_FLAG_VDEC_STATUS,
"%s error= 0x%x poc = %d offset= 0x%x pts= 0x%x last_pts =0x%x pts64 = %lld last_pts64= %lld duration = %d\n",
__func__, (frame->data_flag & ERROR_FLAG), frame->poc,
frame->offset_delimiter, frame->pts,hw->last_pts,
frame->pts64, hw->last_pts64, hw->frame_dur);
hw->last_pts64 = frame->pts64;
hw->last_pts = frame->pts;
}
/* SWPL-18973 96000/15=6400, less than 15fps check */
if ((!hw->duration_from_pts_done) && (hw->frame_dur > 6400ULL)) {
if ((check_force_interlace(hw, frame)) &&
(frame->slice_type == I_SLICE) &&
(hw->pts_outside)) {
if ((!hw->h264_pts_count) || (!hw->h264pts1)) {
hw->h264pts1 = frame->pts;
hw->h264_pts_count = 0;
} else if (frame->pts > hw->h264pts1) {
u32 calc_dur =
PTS2DUR(frame->pts - hw->h264pts1);
calc_dur = ((calc_dur/hw->h264_pts_count) << 1);
if (hw->frame_dur < (calc_dur + 200) &&
hw->frame_dur > (calc_dur - 200)) {
hw->frame_dur >>= 1;
vdec_schedule_work(&hw->notify_work);
dpb_print(DECODE_ID(hw), 0,
"correct frame_dur %d, calc_dur %d, count %d\n",
hw->frame_dur, (calc_dur >> 1), hw->h264_pts_count);
hw->duration_from_pts_done = 1;
hw->h264_pts_count = 0;
}
}
}
hw->h264_pts_count++;
}
if (frame->data_flag & ERROR_FLAG) {
vdec_count_info(&hw->gvs, 1, 0);
if (frame->slice_type == I_SLICE) {
hw->gvs.i_concealed_frames++;
} else if (frame->slice_type == P_SLICE) {
hw->gvs.p_concealed_frames++;
} else if (frame->slice_type == B_SLICE) {
hw->gvs.b_concealed_frames++;
}
if (!hw->send_error_frame_flag) {
hw->gvs.drop_frame_count++;
if (frame->slice_type == I_SLICE) {
hw->gvs.i_lost_frames++;
} else if (frame->slice_type == P_SLICE) {
hw->gvs.p_lost_frames++;
} else if (frame->slice_type == B_SLICE) {
hw->gvs.b_lost_frames++;
}
}
}
if ((!hw->enable_fence) &&
((frame->data_flag & NODISP_FLAG) ||
(frame->data_flag & NULL_FLAG) ||
((!hw->send_error_frame_flag) &&
(frame->data_flag & ERROR_FLAG)) ||
((hw->i_only & 0x1) &&
(!(frame->data_flag & I_FLAG))))) {
frame->show_frame = false;
return 0;
}
if (dpb_is_debug(DECODE_ID(hw), PRINT_FLAG_DPB_DETAIL)) {
dpb_print(DECODE_ID(hw), 0,
"%s, fs[%d] poc %d, buf_spec_num %d\n",
__func__, frame->index, frame->poc,
frame->buf_spec_num);
print_pic_info(DECODE_ID(hw), "predis_frm",
frame->frame, -1);
print_pic_info(DECODE_ID(hw), "predis_top",
frame->top_field, -1);
print_pic_info(DECODE_ID(hw), "predis_bot",
frame->bottom_field, -1);
}
frame->show_frame = true;
return 0;
}
static int post_video_frame(struct vdec_s *vdec, struct FrameStore *frame)
{
struct vdec_h264_hw_s *hw = (struct vdec_h264_hw_s *)vdec->private;
struct vframe_s *vf = NULL;
int buffer_index = frame->buf_spec_num;
struct aml_vcodec_ctx * v4l2_ctx = hw->v4l2_ctx;
struct vdec_v4l2_buffer *fb = NULL;
ulong nv_order = VIDTYPE_VIU_NV21;
int bForceInterlace = 0;
int vf_count = 1;
int i;
/* swap uv */
if (hw->is_used_v4l) {
if ((v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12) ||
(v4l2_ctx->cap_pix_fmt == V4L2_PIX_FMT_NV12M))
nv_order = VIDTYPE_VIU_NV12;
}
if (!is_interlace(frame))
vf_count = 1;
else
vf_count = 2;
bForceInterlace = check_force_interlace(hw, frame);