blob: e6a47fd2fae2c27ce847a1e499217293ffe63767 [file] [log] [blame]
/*
* drivers/amlogic/amports/vvp9.c
*
* Copyright (C) 2015 Amlogic, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#define DEBUG
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/semaphore.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/kfifo.h>
#include <linux/kthread.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/amlogic/media/vfm/vframe.h>
#include <linux/amlogic/media/utils/amstream.h>
#include <linux/amlogic/media/utils/vformat.h>
#include <linux/amlogic/media/frame_sync/ptsserv.h>
#include <linux/amlogic/media/canvas/canvas.h>
#include <linux/amlogic/media/vfm/vframe_provider.h>
#include <linux/amlogic/media/vfm/vframe_receiver.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
#include <linux/slab.h>
//#include <linux/amlogic/tee.h>
#include <uapi/linux/tee.h>
#include <linux/sched/clock.h>
#include "../../../stream_input/amports/amports_priv.h"
#include <linux/amlogic/media/codec_mm/codec_mm.h>
#include "../utils/decoder_mmu_box.h"
#include "../utils/decoder_bmmu_box.h"
#define MEM_NAME "codec_vp9"
#include <linux/amlogic/media/utils/vdec_reg.h>
#include "../utils/vdec.h"
#include "../utils/amvdec.h"
#ifdef CONFIG_AMLOGIC_MEDIA_MULTI_DEC
#include "../utils/vdec_profile.h"
#endif
#include <linux/amlogic/media/video_sink/video.h>
#include <linux/amlogic/media/codec_mm/configs.h>
#include "../utils/config_parser.h"
#include "../utils/firmware.h"
#include "../../../common/chips/decoder_cpu_ver_info.h"
#include "../utils/vdec_v4l2_buffer_ops.h"
#include <media/v4l2-mem2mem.h>
#include "../utils/vdec_feature.h"
#define MIX_STREAM_SUPPORT
#include "vvp9.h"
#define VP9_10B_MMU_DW
/*#define SUPPORT_FB_DECODING*/
/*#define FB_DECODING_TEST_SCHEDULE*/
#define CO_MV_COMPRESS
#define HW_MASK_FRONT 0x1
#define HW_MASK_BACK 0x2
#define VP9D_MPP_REFINFO_TBL_ACCCONFIG 0x3442
#define VP9D_MPP_REFINFO_DATA 0x3443
#define VP9D_MPP_REF_SCALE_ENBL 0x3441
#define HEVC_MPRED_CTRL4 0x324c
#define HEVC_CM_HEADER_START_ADDR 0x3628
#define HEVC_DBLK_CFGB 0x350b
#define HEVCD_MPP_ANC2AXI_TBL_DATA 0x3464
#define HEVC_SAO_MMU_VH1_ADDR 0x363b
#define HEVC_SAO_MMU_VH0_ADDR 0x363a
#define HEVC_SAO_MMU_VH0_ADDR2 0x364d
#define HEVC_SAO_MMU_VH1_ADDR2 0x364e
#define HEVC_MV_INFO 0x310d
#define HEVC_QP_INFO 0x3137
#define HEVC_SKIP_INFO 0x3136
#define HEVC_SAO_CTRL9 0x362d
#define HEVC_CM_HEADER_START_ADDR2 0x364a
#define HEVC_SAO_MMU_DMA_CTRL2 0x364c
#define HEVC_SAO_MMU_VH0_ADDR2 0x364d
#define HEVC_SAO_MMU_VH1_ADDR2 0x364e
#define HEVC_SAO_MMU_STATUS2 0x3650
#define HEVC_DW_VH0_ADDDR 0x365e
#define HEVC_DW_VH1_ADDDR 0x365f
#define VP9_10B_DEC_IDLE 0
#define VP9_10B_DEC_FRAME_HEADER 1
#define VP9_10B_DEC_SLICE_SEGMENT 2
#define VP9_10B_DECODE_SLICE 5
#define VP9_10B_DISCARD_NAL 6
#define VP9_DUMP_LMEM 7
#define HEVC_DECPIC_DATA_DONE 0xa
#define HEVC_DECPIC_DATA_ERROR 0xb
#define HEVC_NAL_DECODE_DONE 0xe
#define HEVC_DECODE_BUFEMPTY 0x20
#define HEVC_DECODE_TIMEOUT 0x21
#define HEVC_SEARCH_BUFEMPTY 0x22
#define HEVC_DECODE_OVER_SIZE 0x23
#define HEVC_S2_DECODING_DONE 0x50
#define VP9_HEAD_PARSER_DONE 0xf0
#define VP9_HEAD_SEARCH_DONE 0xf1
#define VP9_EOS 0xf2
#define HEVC_ACTION_DONE 0xff
#define VF_POOL_SIZE 32
#undef pr_info
#define pr_info printk
#define DECODE_MODE_SINGLE ((0x80 << 24) | 0)
#define DECODE_MODE_MULTI_STREAMBASE ((0x80 << 24) | 1)
#define DECODE_MODE_MULTI_FRAMEBASE ((0x80 << 24) | 2)
#define DECODE_MODE_SINGLE_LOW_LATENCY ((0x80 << 24) | 3)
#define DECODE_MODE_MULTI_FRAMEBASE_NOHEAD ((0x80 << 24) | 4)
#define VP9_TRIGGER_FRAME_DONE 0x100
#define VP9_TRIGGER_FRAME_ENABLE 0x200
#define MV_MEM_UNIT 0x240
/*---------------------------------------------------
* Include "parser_cmd.h"
*---------------------------------------------------
*/
#define PARSER_CMD_SKIP_CFG_0 0x0000090b
#define PARSER_CMD_SKIP_CFG_1 0x1b14140f
#define PARSER_CMD_SKIP_CFG_2 0x001b1910
#define PARSER_CMD_NUMBER 37
/*#define HEVC_PIC_STRUCT_SUPPORT*/
/* to remove, fix build error */
/*#define CODEC_MM_FLAGS_FOR_VDECODER 0*/
#define MULTI_INSTANCE_SUPPORT
#define SUPPORT_10BIT
/* #define ERROR_HANDLE_DEBUG */
#ifndef STAT_KTHREAD
#define STAT_KTHREAD 0x40
#endif
#ifdef MULTI_INSTANCE_SUPPORT
#define MAX_DECODE_INSTANCE_NUM 9
#define MULTI_DRIVER_NAME "ammvdec_vp9"
static unsigned int max_decode_instance_num
= MAX_DECODE_INSTANCE_NUM;
static unsigned int decode_frame_count[MAX_DECODE_INSTANCE_NUM];
static unsigned int display_frame_count[MAX_DECODE_INSTANCE_NUM];
static unsigned int max_process_time[MAX_DECODE_INSTANCE_NUM];
static unsigned int run_count[MAX_DECODE_INSTANCE_NUM];
static unsigned int input_empty[MAX_DECODE_INSTANCE_NUM];
static unsigned int not_run_ready[MAX_DECODE_INSTANCE_NUM];
static u32 decode_timeout_val = 200;
static int start_decode_buf_level = 0x8000;
static u32 work_buf_size;
static u32 force_pts_unstable;
static u32 mv_buf_margin;
static u32 mv_buf_dynamic_alloc;
/* DOUBLE_WRITE_MODE is enabled only when NV21 8 bit output is needed */
/* double_write_mode:
* 0, no double write;
* 1, 1:1 ratio;
* 2, (1/4):(1/4) ratio;
* 3, (1/4):(1/4) ratio, with both compressed frame included
* 4, (1/2):(1/2) ratio;
* 8, (1/8):(1/8) ratio;
* 0x10, double write only
* 0x100, if > 1080p,use mode 4,else use mode 1;
* 0x200, if > 1080p,use mode 2,else use mode 1;
* 0x300, if > 720p, use mode 4, else use mode 1;
*/
static u32 double_write_mode;
#define DRIVER_NAME "amvdec_vp9"
#define DRIVER_HEADER_NAME "amvdec_vp9_header"
#define PUT_INTERVAL (HZ/100)
#define ERROR_SYSTEM_RESET_COUNT 200
#define PTS_NORMAL 0
#define PTS_NONE_REF_USE_DURATION 1
#define PTS_MODE_SWITCHING_THRESHOLD 3
#define PTS_MODE_SWITCHING_RECOVERY_THREASHOLD 3
#define DUR2PTS(x) ((x)*90/96)
struct VP9Decoder_s;
static int vvp9_vf_states(struct vframe_states *states, void *);
static struct vframe_s *vvp9_vf_peek(void *);
static struct vframe_s *vvp9_vf_get(void *);
static void vvp9_vf_put(struct vframe_s *, void *);
static int vvp9_event_cb(int type, void *data, void *private_data);
static int vvp9_stop(struct VP9Decoder_s *pbi);
#ifdef MULTI_INSTANCE_SUPPORT
static s32 vvp9_init(struct vdec_s *vdec);
#else
static s32 vvp9_init(struct VP9Decoder_s *pbi);
#endif
static void vvp9_prot_init(struct VP9Decoder_s *pbi, u32 mask);
static int vvp9_local_init(struct VP9Decoder_s *pbi);
static void vvp9_put_timer_func(struct timer_list *timer);
static void dump_data(struct VP9Decoder_s *pbi, int size);
static unsigned char get_data_check_sum
(struct VP9Decoder_s *pbi, int size);
static void dump_pic_list(struct VP9Decoder_s *pbi);
static int vp9_alloc_mmu(
struct VP9Decoder_s *pbi,
int cur_buf_idx,
int pic_width,
int pic_height,
unsigned short bit_depth,
unsigned int *mmu_index_adr);
#ifdef VP9_10B_MMU_DW
int vp9_alloc_mmu_dw(
struct VP9Decoder_s *pbi,
int cur_buf_idx,
int pic_width,
int pic_height,
unsigned short bit_depth,
unsigned int *mmu_index_adr);
#endif
static const char vvp9_dec_id[] = "vvp9-dev";
#define PROVIDER_NAME "decoder.vp9"
#define MULTI_INSTANCE_PROVIDER_NAME "vdec.vp9"
static const struct vframe_operations_s vvp9_vf_provider = {
.peek = vvp9_vf_peek,
.get = vvp9_vf_get,
.put = vvp9_vf_put,
.event_cb = vvp9_event_cb,
.vf_states = vvp9_vf_states,
};
static struct vframe_provider_s vvp9_vf_prov;
static u32 bit_depth_luma;
static u32 bit_depth_chroma;
static u32 frame_width;
static u32 frame_height;
static u32 video_signal_type;
static u32 on_no_keyframe_skiped;
#define PROB_SIZE (496 * 2 * 4)
#define PROB_BUF_SIZE (0x5000)
#define COUNT_BUF_SIZE (0x300 * 4 * 4)
/*compute_losless_comp_body_size(4096, 2304, 1) = 18874368(0x1200000)*/
#define MAX_FRAME_4K_NUM 0x1200
#define MAX_FRAME_8K_NUM 0x4800
#define HEVC_ASSIST_MMU_MAP_ADDR 0x3009
// bit[31:20] -- fb_read_lcu_y
// READ only// bit[19:8] -- fb_read_lcu_x
// READ only// bit[7] -- fb_read_lcu_latch
// bit[6:5] -- reserved
// bit[4] -- fb_disable_wr_iqit_buf
// bit[3] -- fb_read_avs2_enable
// bit[2] -- fb_read_vp9_enable
// bit[1] -- fb_avs2_enable
// bit[0] -- fb_vp9_enable
#define HEVC_ASSIST_HED_FB_CTL 0x300c
// [31:16] height// [15:0] width
#define HEVC_ASSIST_PIC_SIZE_FB_READ 0x300d
#define HEVC_ASSIST_MMU_MAP_ADDR2 0x300e
#ifdef SUPPORT_FB_DECODING
/* register define */
#define HEVC_ASSIST_HED_FB_W_CTL 0x3006
#define HEVC_ASSIST_HED_FB_R_CTL 0x3007
#define HEVC_ASSIST_HED_FB_ADDR 0x3008
#define HEVC_ASSIST_FB_MMU_MAP_ADDR 0x300a
#define HEVC_ASSIST_FBD_MMU_MAP_ADDR 0x300b
#define MAX_STAGE_PAGE_NUM 0x1200
#define STAGE_MMU_MAP_SIZE (MAX_STAGE_PAGE_NUM * 4)
#endif
static inline int div_r32(int64_t m, int n)
{
/*
*return (int)(m/n)
*/
#ifndef CONFIG_ARM64
int64_t qu = 0;
qu = div_s64(m, n);
return (int)qu;
#else
return (int)(m/n);
#endif
}
/*USE_BUF_BLOCK*/
struct BUF_s {
int index;
unsigned int alloc_flag;
/*buffer */
unsigned int cma_page_count;
unsigned long alloc_addr;
unsigned long start_adr;
unsigned int size;
unsigned int free_start_adr;
ulong v4l_ref_buf_addr;
ulong header_addr;
u32 header_size;
u32 luma_size;
ulong chroma_addr;
u32 chroma_size;
} /*BUF_t */;
struct MVBUF_s {
unsigned long start_adr;
unsigned int size;
int used_flag;
} /*MVBUF_t */;
/* #undef BUFMGR_ONLY to enable hardware configuration */
/*#define TEST_WR_PTR_INC*/
/*#define WR_PTR_INC_NUM 128*/
#define WR_PTR_INC_NUM 1
#define SIMULATION
#define DOS_PROJECT
#undef MEMORY_MAP_IN_REAL_CHIP
/*#undef DOS_PROJECT*/
/*#define MEMORY_MAP_IN_REAL_CHIP*/
/*#define BUFFER_MGR_ONLY*/
/*#define CONFIG_HEVC_CLK_FORCED_ON*/
/*#define ENABLE_SWAP_TEST*/
#define MCRCC_ENABLE
#define VP9_LPF_LVL_UPDATE
/*#define DBG_LF_PRINT*/
#ifdef VP9_10B_NV21
#else
#define LOSLESS_COMPRESS_MODE
#endif
#define DOUBLE_WRITE_YSTART_TEMP 0x02000000
#define DOUBLE_WRITE_CSTART_TEMP 0x02900000
#define VP9_DEBUG_BUFMGR 0x01
#define VP9_DEBUG_BUFMGR_MORE 0x02
#define VP9_DEBUG_BUFMGR_DETAIL 0x04
#define VP9_DEBUG_OUT_PTS 0x10
#define VP9_DEBUG_SEND_PARAM_WITH_REG 0x100
#define VP9_DEBUG_MERGE 0x200
#define VP9_DEBUG_DBG_LF_PRINT 0x400
#define VP9_DEBUG_REG 0x800
#define VP9_DEBUG_2_STAGE 0x1000
#define VP9_DEBUG_2_STAGE_MORE 0x2000
#define VP9_DEBUG_QOS_INFO 0x4000
#define VP9_DEBUG_DIS_LOC_ERROR_PROC 0x10000
#define VP9_DEBUG_DIS_SYS_ERROR_PROC 0x20000
#define VP9_DEBUG_DUMP_PIC_LIST 0x40000
#define VP9_DEBUG_TRIG_SLICE_SEGMENT_PROC 0x80000
#define VP9_DEBUG_NO_TRIGGER_FRAME 0x100000
#define VP9_DEBUG_LOAD_UCODE_FROM_FILE 0x200000
#define VP9_DEBUG_FORCE_SEND_AGAIN 0x400000
#define VP9_DEBUG_DUMP_DATA 0x800000
#define VP9_DEBUG_CACHE 0x1000000
#define VP9_DEBUG_CACHE_HIT_RATE 0x2000000
#define IGNORE_PARAM_FROM_CONFIG 0x8000000
#ifdef MULTI_INSTANCE_SUPPORT
#define PRINT_FLAG_ERROR 0x0
#define PRINT_FLAG_V4L_DETAIL 0x10000000
#define PRINT_FLAG_VDEC_STATUS 0x20000000
#define PRINT_FLAG_VDEC_DETAIL 0x40000000
#define PRINT_FLAG_VDEC_DATA 0x80000000
#endif
static u32 force_bufspec;
static u32 debug;
static bool is_reset;
/*for debug*/
/*
udebug_flag:
bit 0, enable ucode print
bit 1, enable ucode detail print
bit [31:16] not 0, pos to dump lmem
bit 2, pop bits to lmem
bit [11:8], pre-pop bits for alignment (when bit 2 is 1)
*/
static u32 udebug_flag;
/*
when udebug_flag[1:0] is not 0
udebug_pause_pos not 0,
pause position
*/
static u32 udebug_pause_pos;
/*
when udebug_flag[1:0] is not 0
and udebug_pause_pos is not 0,
pause only when DEBUG_REG2 is equal to this val
*/
static u32 udebug_pause_val;
static u32 udebug_pause_decode_idx;
static u32 without_display_mode;
static u32 v4l_bitstream_id_enable = 1;
/*
*[3:0] 0: default use config from omx.
* 1: force enable fence.
* 2: disable fence.
*[7:4] 0: fence use for driver.
* 1: fence fd use for app.
*/
static u32 force_config_fence;
#define DEBUG_REG
#ifdef DEBUG_REG
void WRITE_VREG_DBG2(unsigned int adr, unsigned int val)
{
if (debug & VP9_DEBUG_REG)
pr_info("%s(%x, %x)\n", __func__, adr, val);
if (adr != 0)
WRITE_VREG(adr, val);
}
#undef WRITE_VREG
#define WRITE_VREG WRITE_VREG_DBG2
#endif
#define FRAME_CNT_WINDOW_SIZE 59
#define RATE_CORRECTION_THRESHOLD 5
/**************************************************
VP9 buffer management start
***************************************************/
#define MMU_COMPRESS_HEADER_SIZE_1080P 0x10000
#define MMU_COMPRESS_HEADER_SIZE_4K 0x48000
#define MMU_COMPRESS_HEADER_SIZE_8K 0x120000
//#define MMU_COMPRESS_HEADER_SIZE 0x48000
//#define MMU_COMPRESS_HEADER_SIZE_DW 0x48000
//#define MMU_COMPRESS_8K_HEADER_SIZE (MMU_COMPRESS_HEADER_SIZE * 4)
#define MMU_COMPRESS_HEADER_SIZE 0x48000
#define MMU_COMPRESS_HEADER_SIZE_DW 0x48000
#define MMU_COMPRESS_8K_HEADER_SIZE (MMU_COMPRESS_HEADER_SIZE * 4)
#define MAX_SIZE_8K (8192 * 4608)
#define MAX_SIZE_4K (4096 * 2304)
#define MAX_SIZE_2K (1920 * 1088)
#define IS_8K_SIZE(w, h) (((w) * (h)) > MAX_SIZE_4K)
#define IS_4K_SIZE(w, h) (((w) * (h)) > (1920*1088))
#define INVALID_IDX -1 /* Invalid buffer index.*/
#define RPM_BEGIN 0x200
#define RPM_END 0x280
union param_u {
struct {
unsigned short data[RPM_END - RPM_BEGIN];
} l;
struct {
/* from ucode lmem, do not change this struct */
unsigned short profile;
unsigned short show_existing_frame;
unsigned short frame_to_show_idx;
unsigned short frame_type; /*1 bit*/
unsigned short show_frame; /*1 bit*/
unsigned short error_resilient_mode; /*1 bit*/
unsigned short intra_only; /*1 bit*/
unsigned short display_size_present; /*1 bit*/
unsigned short reset_frame_context;
unsigned short refresh_frame_flags;
unsigned short width;
unsigned short height;
unsigned short display_width;
unsigned short display_height;
/*
*bit[11:8] - ref_frame_info_0 (ref(3-bits), ref_frame_sign_bias(1-bit))
*bit[7:4] - ref_frame_info_1 (ref(3-bits), ref_frame_sign_bias(1-bit))
*bit[3:0] - ref_frame_info_2 (ref(3-bits), ref_frame_sign_bias(1-bit))
*/
unsigned short ref_info;
/*
*bit[2]: same_frame_size0
*bit[1]: same_frame_size1
*bit[0]: same_frame_size2
*/
unsigned short same_frame_size;
unsigned short mode_ref_delta_enabled;
unsigned short ref_deltas[4];
unsigned short mode_deltas[2];
unsigned short filter_level;
unsigned short sharpness_level;
unsigned short bit_depth;
unsigned short seg_quant_info[8];
unsigned short seg_enabled;
unsigned short seg_abs_delta;
/* bit 15: feature enabled; bit 8, sign; bit[5:0], data */
unsigned short seg_lf_info[8];
} p;
};
struct vpx_codec_frame_buffer_s {
uint8_t *data; /**< Pointer to the data buffer */
size_t size; /**< Size of data in bytes */
void *priv; /**< Frame's private data */
};
enum vpx_color_space_t {
VPX_CS_UNKNOWN = 0, /**< Unknown */
VPX_CS_BT_601 = 1, /**< BT.601 */
VPX_CS_BT_709 = 2, /**< BT.709 */
VPX_CS_SMPTE_170 = 3, /**< SMPTE.170 */
VPX_CS_SMPTE_240 = 4, /**< SMPTE.240 */
VPX_CS_BT_2020 = 5, /**< BT.2020 */
VPX_CS_RESERVED = 6, /**< Reserved */
VPX_CS_SRGB = 7 /**< sRGB */
}; /**< alias for enum vpx_color_space */
enum vpx_bit_depth_t {
VPX_BITS_8 = 8, /**< 8 bits */
VPX_BITS_10 = 10, /**< 10 bits */
VPX_BITS_12 = 12, /**< 12 bits */
};
#define MAX_SLICE_NUM 1024
struct PIC_BUFFER_CONFIG_s {
int index;
int BUF_index;
int mv_buf_index;
int comp_body_size;
int buf_size;
int vf_ref;
int y_canvas_index;
int uv_canvas_index;
#ifdef MULTI_INSTANCE_SUPPORT
struct canvas_config_s canvas_config[2];
#endif
int decode_idx;
int slice_type;
int stream_offset;
u32 pts;
u64 pts64;
u64 timestamp;
uint8_t error_mark;
/**/
int slice_idx;
/*buffer*/
unsigned long header_adr;
#ifdef VP9_10B_MMU_DW
unsigned long header_dw_adr;
#endif
unsigned long mpred_mv_wr_start_addr;
int mv_size;
/*unsigned long mc_y_adr;
*unsigned long mc_u_v_adr;
*/
unsigned int dw_y_adr;
unsigned int dw_u_v_adr;
u32 luma_size;
u32 chroma_size;
int mc_canvas_y;
int mc_canvas_u_v;
int lcu_total;
/**/
int y_width;
int y_height;
int y_crop_width;
int y_crop_height;
int y_stride;
int uv_width;
int uv_height;
int uv_crop_width;
int uv_crop_height;
int uv_stride;
int alpha_width;
int alpha_height;
int alpha_stride;
uint8_t *y_buffer;
uint8_t *u_buffer;
uint8_t *v_buffer;
uint8_t *alpha_buffer;
uint8_t *buffer_alloc;
int buffer_alloc_sz;
int border;
int frame_size;
int subsampling_x;
int subsampling_y;
unsigned int bit_depth;
enum vpx_color_space_t color_space;
int corrupted;
int flags;
unsigned long cma_alloc_addr;
int double_write_mode;
/* picture qos infomation*/
int max_qp;
int avg_qp;
int min_qp;
int max_skip;
int avg_skip;
int min_skip;
int max_mv;
int min_mv;
int avg_mv;
u32 hw_decode_time;
u32 frame_size2; // For frame base mode
/* vdec sync. */
struct dma_fence *fence;
/* hdr10 plus data */
u32 hdr10p_data_size;
char *hdr10p_data_buf;
} PIC_BUFFER_CONFIG;
enum BITSTREAM_PROFILE {
PROFILE_0,
PROFILE_1,
PROFILE_2,
PROFILE_3,
MAX_PROFILES
};
enum FRAME_TYPE {
KEY_FRAME = 0,
INTER_FRAME = 1,
FRAME_TYPES,
};
enum REFERENCE_MODE {
SINGLE_REFERENCE = 0,
COMPOUND_REFERENCE = 1,
REFERENCE_MODE_SELECT = 2,
REFERENCE_MODES = 3,
};
#define NONE -1
#define INTRA_FRAME 0
#define LAST_FRAME 1
#define GOLDEN_FRAME 2
#define ALTREF_FRAME 3
#define MAX_REF_FRAMES 4
#define REFS_PER_FRAME 3
#define REF_FRAMES_LOG2 3
#define REF_FRAMES (1 << REF_FRAMES_LOG2)
#define REF_FRAMES_4K (6)
/*4 scratch frames for the new frames to support a maximum of 4 cores decoding
*in parallel, 3 for scaled references on the encoder.
*TODO(hkuang): Add ondemand frame buffers instead of hardcoding the number
* // of framebuffers.
*TODO(jkoleszar): These 3 extra references could probably come from the
*normal reference pool.
*/
#define FRAME_BUFFERS (REF_FRAMES + 16)
#define HEADER_FRAME_BUFFERS (FRAME_BUFFERS)
#define MAX_BUF_NUM (FRAME_BUFFERS)
#define MV_BUFFER_NUM FRAME_BUFFERS
#ifdef SUPPORT_FB_DECODING
#define STAGE_MAX_BUFFERS 16
#else
#define STAGE_MAX_BUFFERS 0
#endif
#define FRAME_CONTEXTS_LOG2 2
#define FRAME_CONTEXTS (1 << FRAME_CONTEXTS_LOG2)
/*buffer + header buffer + workspace*/
#ifdef MV_USE_FIXED_BUF
#define MAX_BMMU_BUFFER_NUM (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + 1)
#define VF_BUFFER_IDX(n) (n)
#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n)
#define WORK_SPACE_BUF_ID (FRAME_BUFFERS + HEADER_FRAME_BUFFERS)
#else
#define MAX_BMMU_BUFFER_NUM \
(FRAME_BUFFERS + HEADER_FRAME_BUFFERS + MV_BUFFER_NUM + 1)
#define VF_BUFFER_IDX(n) (n)
#define HEADER_BUFFER_IDX(n) (FRAME_BUFFERS + n)
#define MV_BUFFER_IDX(n) (FRAME_BUFFERS + HEADER_FRAME_BUFFERS + n)
#define WORK_SPACE_BUF_ID \
(FRAME_BUFFERS + HEADER_FRAME_BUFFERS + MV_BUFFER_NUM)
#endif
struct RefCntBuffer_s {
int ref_count;
/*MV_REF *mvs;*/
int mi_rows;
int mi_cols;
struct vpx_codec_frame_buffer_s raw_frame_buffer;
struct PIC_BUFFER_CONFIG_s buf;
/*The Following variables will only be used in frame parallel decode.
*
*frame_worker_owner indicates which FrameWorker owns this buffer. NULL means
*that no FrameWorker owns, or is decoding, this buffer.
*VP9Worker *frame_worker_owner;
*
*row and col indicate which position frame has been decoded to in real
*pixel unit. They are reset to -1 when decoding begins and set to INT_MAX
*when the frame is fully decoded.
*/
int row;
int col;
int show_frame;
} RefCntBuffer;
struct RefBuffer_s {
/*TODO(dkovalev): idx is not really required and should be removed, now it
*is used in vp9_onyxd_if.c
*/
int idx;
struct PIC_BUFFER_CONFIG_s *buf;
/*struct scale_factors sf;*/
} RefBuffer;
struct InternalFrameBuffer_s {
uint8_t *data;
size_t size;
int in_use;
} InternalFrameBuffer;
struct InternalFrameBufferList_s {
int num_internal_frame_buffers;
struct InternalFrameBuffer_s *int_fb;
} InternalFrameBufferList;
struct BufferPool_s {
/*Protect BufferPool from being accessed by several FrameWorkers at
*the same time during frame parallel decode.
*TODO(hkuang): Try to use atomic variable instead of locking the whole pool.
*
*Private data associated with the frame buffer callbacks.
*void *cb_priv;
*
*vpx_get_frame_buffer_cb_fn_t get_fb_cb;
*vpx_release_frame_buffer_cb_fn_t release_fb_cb;
*/
struct RefCntBuffer_s frame_bufs[FRAME_BUFFERS];
/*Frame buffers allocated internally by the codec.*/
struct InternalFrameBufferList_s int_frame_buffers;
unsigned long flags;
spinlock_t lock;
} BufferPool;
#define lock_buffer_pool(pool, flags) \
spin_lock_irqsave(&pool->lock, flags)
#define unlock_buffer_pool(pool, flags) \
spin_unlock_irqrestore(&pool->lock, flags)
struct VP9_Common_s {
enum vpx_color_space_t color_space;
int width;
int height;
int display_width;
int display_height;
int last_width;
int last_height;
int subsampling_x;
int subsampling_y;
int use_highbitdepth;/*Marks if we need to use 16bit frame buffers.*/
struct PIC_BUFFER_CONFIG_s *frame_to_show;
struct RefCntBuffer_s *prev_frame;
/*TODO(hkuang): Combine this with cur_buf in macroblockd.*/
struct RefCntBuffer_s *cur_frame;
int ref_frame_map[REF_FRAMES]; /* maps fb_idx to reference slot */
/*Prepare ref_frame_map for the next frame.
*Only used in frame parallel decode.
*/
int next_ref_frame_map[REF_FRAMES];
/* TODO(jkoleszar): could expand active_ref_idx to 4,
*with 0 as intra, and roll new_fb_idx into it.
*/
/*Each frame can reference REFS_PER_FRAME buffers*/
struct RefBuffer_s frame_refs[REFS_PER_FRAME];
int prev_fb_idx;
int new_fb_idx;
int cur_fb_idx_mmu;
/*last frame's frame type for motion search*/
enum FRAME_TYPE last_frame_type;
enum FRAME_TYPE frame_type;
int show_frame;
int last_show_frame;
int show_existing_frame;
/*Flag signaling that the frame is encoded using only INTRA modes.*/
uint8_t intra_only;
uint8_t last_intra_only;
int allow_high_precision_mv;
/*Flag signaling that the frame context should be reset to default
*values. 0 or 1 implies don't reset, 2 reset just the context
*specified in the frame header, 3 reset all contexts.
*/
int reset_frame_context;
/*MBs, mb_rows/cols is in 16-pixel units; mi_rows/cols is in
* MODE_INFO (8-pixel) units.
*/
int MBs;
int mb_rows, mi_rows;
int mb_cols, mi_cols;
int mi_stride;
/*Whether to use previous frame's motion vectors for prediction.*/
int use_prev_frame_mvs;
int refresh_frame_context; /* Two state 0 = NO, 1 = YES */
int ref_frame_sign_bias[MAX_REF_FRAMES]; /* Two state 0, 1 */
/*struct loopfilter lf;*/
/*struct segmentation seg;*/
/*TODO(hkuang):Remove this as it is the same as frame_parallel_decode*/
/* in pbi.*/
int frame_parallel_decode; /* frame-based threading.*/
/*Context probabilities for reference frame prediction*/
/*MV_REFERENCE_FRAME comp_fixed_ref;*/
/*MV_REFERENCE_FRAME comp_var_ref[2];*/
enum REFERENCE_MODE reference_mode;
/*FRAME_CONTEXT *fc; */ /* this frame entropy */
/*FRAME_CONTEXT *frame_contexts; */ /*FRAME_CONTEXTS*/
/*unsigned int frame_context_idx; *//* Context to use/update */
/*FRAME_COUNTS counts;*/
unsigned int current_video_frame;
enum BITSTREAM_PROFILE profile;
enum vpx_bit_depth_t bit_depth;
int error_resilient_mode;
int frame_parallel_decoding_mode;
int byte_alignment;
int skip_loop_filter;
/*External BufferPool passed from outside.*/
struct BufferPool_s *buffer_pool;
int above_context_alloc_cols;
};
static void set_canvas(struct VP9Decoder_s *pbi,
struct PIC_BUFFER_CONFIG_s *pic_config);
static int prepare_display_buf(struct VP9Decoder_s *pbi,
struct PIC_BUFFER_CONFIG_s *pic_config);
static void fill_frame_info(struct VP9Decoder_s *pbi,
struct PIC_BUFFER_CONFIG_s *frame,
unsigned int framesize,
unsigned int pts);
static struct PIC_BUFFER_CONFIG_s *get_frame_new_buffer(struct VP9_Common_s *cm)
{
return &cm->buffer_pool->frame_bufs[cm->new_fb_idx].buf;
}
static void ref_cnt_fb(struct RefCntBuffer_s *bufs, int *idx, int new_idx)
{
const int ref_index = *idx;
if (ref_index >= 0 && bufs[ref_index].ref_count > 0) {
bufs[ref_index].ref_count--;
/*pr_info("[MMU DEBUG 2] dec ref_count[%d] : %d\r\n",
* ref_index, bufs[ref_index].ref_count);
*/
}
*idx = new_idx;
bufs[new_idx].ref_count++;
/*pr_info("[MMU DEBUG 3] inc ref_count[%d] : %d\r\n",
* new_idx, bufs[new_idx].ref_count);
*/
}
int vp9_release_frame_buffer(struct vpx_codec_frame_buffer_s *fb)
{
struct InternalFrameBuffer_s *const int_fb =
(struct InternalFrameBuffer_s *)fb->priv;
if (int_fb)
int_fb->in_use = 0;
return 0;
}
static int compute_losless_comp_body_size(int width, int height,
uint8_t is_bit_depth_10);
static void setup_display_size(struct VP9_Common_s *cm, union param_u *params,
int print_header_info)
{
cm->display_width = cm->width;
cm->display_height = cm->height;
if (params->p.display_size_present) {
if (print_header_info)
pr_info(" * 1-bit display_size_present read : 1\n");
cm->display_width = params->p.display_width;
cm->display_height = params->p.display_height;
/*vp9_read_frame_size(rb, &cm->display_width,
* &cm->display_height);
*/
} else {
if (print_header_info)
pr_info(" * 1-bit display_size_present read : 0\n");
}
}
uint8_t print_header_info = 0;
struct buff_s {
u32 buf_start;
u32 buf_size;
u32 buf_end;
} buff_t;
struct BuffInfo_s {
u32 max_width;
u32 max_height;
u32 start_adr;
u32 end_adr;
struct buff_s ipp;
struct buff_s sao_abv;
struct buff_s sao_vb;
struct buff_s short_term_rps;
struct buff_s vps;
struct buff_s sps;
struct buff_s pps;
struct buff_s sao_up;
struct buff_s swap_buf;
struct buff_s swap_buf2;
struct buff_s scalelut;
struct buff_s dblk_para;
struct buff_s dblk_data;
struct buff_s seg_map;
struct buff_s mmu_vbh;
struct buff_s cm_header;
#ifdef VP9_10B_MMU_DW
struct buff_s mmu_vbh_dw;
struct buff_s cm_header_dw;
#endif
struct buff_s mpred_above;
#ifdef MV_USE_FIXED_BUF
struct buff_s mpred_mv;
#endif
struct buff_s rpm;
struct buff_s lmem;
} BuffInfo_t;
#ifdef MULTI_INSTANCE_SUPPORT
#define DEC_RESULT_NONE 0
#define DEC_RESULT_DONE 1
#define DEC_RESULT_AGAIN 2
#define DEC_RESULT_CONFIG_PARAM 3
#define DEC_RESULT_ERROR 4
#define DEC_INIT_PICLIST 5
#define DEC_UNINIT_PICLIST 6
#define DEC_RESULT_GET_DATA 7
#define DEC_RESULT_GET_DATA_RETRY 8
#define DEC_RESULT_EOS 9
#define DEC_RESULT_FORCE_EXIT 10
#define DEC_RESULT_NEED_MORE_BUFFER 11
#define DEC_V4L2_CONTINUE_DECODING 18
#define DEC_S1_RESULT_NONE 0
#define DEC_S1_RESULT_DONE 1
#define DEC_S1_RESULT_FORCE_EXIT 2
#define DEC_S1_RESULT_TEST_TRIGGER_DONE 0xf0
#ifdef FB_DECODING_TEST_SCHEDULE
#define TEST_SET_NONE 0
#define TEST_SET_PIC_DONE 1
#define TEST_SET_S2_DONE 2
#endif
static void vp9_work(struct work_struct *work);
#endif
struct loop_filter_info_n;
struct loopfilter;
struct segmentation;
#ifdef SUPPORT_FB_DECODING
static void mpred_process(struct VP9Decoder_s *pbi);
static void vp9_s1_work(struct work_struct *work);
struct stage_buf_s {
int index;
unsigned short rpm[RPM_END - RPM_BEGIN];
};
static unsigned int not_run2_ready[MAX_DECODE_INSTANCE_NUM];
static unsigned int run2_count[MAX_DECODE_INSTANCE_NUM];
#ifdef FB_DECODING_TEST_SCHEDULE
u32 stage_buf_num; /* = 16;*/
#else
u32 stage_buf_num;
#endif
#endif
struct vp9_fence_vf_t {
u32 used_size;
struct vframe_s *fence_vf[VF_POOL_SIZE];
};
struct VP9Decoder_s {
#ifdef MULTI_INSTANCE_SUPPORT
unsigned char index;
struct device *cma_dev;
struct platform_device *platform_dev;
void (*vdec_cb)(struct vdec_s *, void *);
void *vdec_cb_arg;
struct vframe_chunk_s *chunk;
int dec_result;
struct work_struct work;
struct work_struct recycle_mmu_work;
struct work_struct set_clk_work;
u32 start_shift_bytes;
struct BuffInfo_s work_space_buf_store;
unsigned long buf_start;
u32 buf_size;
u32 cma_alloc_count;
unsigned long cma_alloc_addr;
uint8_t eos;
unsigned long int start_process_time;
unsigned last_lcu_idx;
int decode_timeout_count;
unsigned timeout_num;
int save_buffer_mode;
int double_write_mode;
#endif
long used_4k_num;
unsigned char m_ins_flag;
char *provider_name;
union param_u param;
int frame_count;
int pic_count;
u32 stat;
struct timer_list timer;
u32 frame_dur;
u32 frame_ar;
int fatal_error;
uint8_t init_flag;
uint8_t first_sc_checked;
uint8_t process_busy;
#define PROC_STATE_INIT 0
#define PROC_STATE_DECODESLICE 1
#define PROC_STATE_SENDAGAIN 2
uint8_t process_state;
u32 ucode_pause_pos;
int show_frame_num;
struct buff_s mc_buf_spec;
struct dec_sysinfo vvp9_amstream_dec_info;
void *rpm_addr;
void *lmem_addr;
dma_addr_t rpm_phy_addr;
dma_addr_t lmem_phy_addr;
unsigned short *lmem_ptr;
unsigned short *debug_ptr;
void *prob_buffer_addr;
void *count_buffer_addr;
dma_addr_t prob_buffer_phy_addr;
dma_addr_t count_buffer_phy_addr;
void *frame_mmu_map_addr;
dma_addr_t frame_mmu_map_phy_addr;
#ifdef VP9_10B_MMU_DW
void *frame_mmu_dw_map_addr;
dma_addr_t frame_mmu_dw_map_phy_addr;
#endif
unsigned int use_cma_flag;
struct BUF_s m_BUF[MAX_BUF_NUM];
struct MVBUF_s m_mv_BUF[MV_BUFFER_NUM];
u32 used_buf_num;
DECLARE_KFIFO(newframe_q, struct vframe_s *, VF_POOL_SIZE);
DECLARE_KFIFO(display_q, struct vframe_s *, VF_POOL_SIZE);
DECLARE_KFIFO(pending_q, struct vframe_s *, VF_POOL_SIZE);
struct vframe_s vfpool[VF_POOL_SIZE];
u32 vf_pre_count;
u32 vf_get_count;
u32 vf_put_count;
int buf_num;
int pic_num;
int lcu_size_log2;
unsigned int losless_comp_body_size;
u32 video_signal_type;
int pts_mode;
int last_lookup_pts;
int last_pts;
u64 last_lookup_pts_us64;
u64 last_pts_us64;
u64 shift_byte_count;
u32 pts_unstable;
u32 frame_cnt_window;
u32 pts1, pts2;
u32 last_duration;
u32 duration_from_pts_done;
bool vp9_first_pts_ready;
u32 shift_byte_count_lo;
u32 shift_byte_count_hi;
int pts_mode_switching_count;
int pts_mode_recovery_count;
bool get_frame_dur;
u32 saved_resolution;
/**/
struct VP9_Common_s common;
struct RefCntBuffer_s *cur_buf;
int refresh_frame_flags;
uint8_t need_resync;
uint8_t hold_ref_buf;
uint8_t ready_for_new_data;
struct BufferPool_s vp9_buffer_pool;
struct BuffInfo_s *work_space_buf;
struct buff_s *mc_buf;
unsigned int frame_width;
unsigned int frame_height;
unsigned short *rpm_ptr;
int init_pic_w;
int init_pic_h;
int lcu_total;
int lcu_size;
int slice_type;
int skip_flag;
int decode_idx;
int slice_idx;
uint8_t has_keyframe;
uint8_t wait_buf;
uint8_t error_flag;
/* bit 0, for decoding; bit 1, for displaying */
uint8_t ignore_bufmgr_error;
int PB_skip_mode;
int PB_skip_count_after_decoding;
/*hw*/
/*lf*/
int default_filt_lvl;
struct loop_filter_info_n *lfi;
struct loopfilter *lf;
struct segmentation *seg_4lf;
/**/
struct vdec_info *gvs;
u32 pre_stream_offset;
unsigned int dec_status;
u32 last_put_idx;
int new_frame_displayed;
void *mmu_box;
void *bmmu_box;
int mmu_enable;
#ifdef VP9_10B_MMU_DW
void *mmu_box_dw;
int dw_mmu_enable;
#endif
struct vframe_master_display_colour_s vf_dp;
struct firmware_s *fw;
int max_pic_w;
int max_pic_h;
#ifdef SUPPORT_FB_DECODING
int dec_s1_result;
int s1_test_cmd;
struct work_struct s1_work;
int used_stage_buf_num;
int s1_pos;
int s2_pos;
void *stage_mmu_map_addr;
dma_addr_t stage_mmu_map_phy_addr;
struct stage_buf_s *s1_buf;
struct stage_buf_s *s2_buf;
struct stage_buf_s *stage_bufs
[STAGE_MAX_BUFFERS];
unsigned char run2_busy;
int s1_mv_buf_index;
int s1_mv_buf_index_pre;
int s1_mv_buf_index_pre_pre;
unsigned long s1_mpred_mv_wr_start_addr;
unsigned long s1_mpred_mv_wr_start_addr_pre;
unsigned short s1_intra_only;
unsigned short s1_frame_type;
unsigned short s1_width;
unsigned short s1_height;
unsigned short s1_last_show_frame;
union param_u s1_param;
u8 back_not_run_ready;
#endif
int need_cache_size;
u64 sc_start_time;
bool postproc_done;
int low_latency_flag;
bool no_head;
bool pic_list_init_done;
bool pic_list_init_done2;
bool is_used_v4l;
void *v4l2_ctx;
bool v4l_params_parsed;
int frameinfo_enable;
struct vframe_qos_s vframe_qos;
u32 mem_map_mode;
u32 dynamic_buf_num_margin;
struct vframe_s vframe_dummy;
u32 res_ch_flag;
/*struct VP9Decoder_s vp9_decoder;*/
union param_u vp9_param;
int sidebind_type;
int sidebind_channel_id;
bool enable_fence;
int fence_usage;
u32 frame_mode_pts_save[FRAME_BUFFERS];
u64 frame_mode_pts64_save[FRAME_BUFFERS];
int run_ready_min_buf_num;
int one_package_frame_cnt;
int buffer_wrap[FRAME_BUFFERS];
int last_width;
int last_height;
u32 error_frame_width;
u32 error_frame_height;
u32 endian;
ulong fb_token;
struct vp9_fence_vf_t fence_vf_s;
struct mutex fence_mutex;
dma_addr_t rdma_phy_adr;
unsigned *rdma_adr;
struct trace_decoder_name trace;
};
static int vp9_print(struct VP9Decoder_s *pbi,
int flag, const char *fmt, ...)
{
#define HEVC_PRINT_BUF 512
unsigned char buf[HEVC_PRINT_BUF];
int len = 0;
if (pbi == NULL ||
(flag == 0) ||
(debug & flag)) {
va_list args;
va_start(args, fmt);
if (pbi)
len = sprintf(buf, "[%d]", pbi->index);
vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args);
pr_debug("%s", buf);
va_end(args);
}
return 0;
}
static int is_oversize(int w, int h)
{
int max = (get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1)?
MAX_SIZE_8K : MAX_SIZE_4K;
if (get_cpu_major_id() == AM_MESON_CPU_MAJOR_ID_T5D)
max = MAX_SIZE_2K;
if (w <= 0 || h <= 0)
return true;
if (h != 0 && (w > max / h))
return true;
return false;
}
static int vvp9_mmu_compress_header_size(int w, int h)
{
if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
IS_8K_SIZE(w, h))
return (MMU_COMPRESS_HEADER_SIZE_8K);
if (IS_4K_SIZE(w, h))
return (MMU_COMPRESS_HEADER_SIZE_4K);
return (MMU_COMPRESS_HEADER_SIZE_1080P);
}
/*#define FRAME_MMU_MAP_SIZE (MAX_FRAME_4K_NUM * 4)*/
static int vvp9_frame_mmu_map_size(struct VP9Decoder_s *pbi)
{
if ((get_cpu_major_id() >= AM_MESON_CPU_MAJOR_ID_SM1) &&
IS_8K_SIZE(pbi->max_pic_w, pbi->max_pic_h))
return (MAX_FRAME_8K_NUM << 2);
return (MAX_FRAME_4K_NUM << 2);
}
static int v4l_alloc_and_config_pic(struct VP9Decoder_s *pbi,
struct PIC_BUFFER_CONFIG_s *pic);
static void resize_context_buffers(struct VP9Decoder_s *pbi,
struct VP9_Common_s *cm, int width, int height)
{
if (cm->width != width || cm->height != height) {
/* to do ..*/
if (pbi != NULL) {
pbi->vp9_first_pts_ready = 0;
pbi->duration_from_pts_done = 0;
}
pr_info("%s (%d,%d)=>(%d,%d)\r\n", __func__,
cm->width, cm->height,
width, height);
cm->width = width;
cm->height = height;
}
/*
*if (cm->cur_frame->mvs == NULL ||
* cm->mi_rows > cm->cur_frame->mi_rows ||
* cm->mi_cols > cm->cur_frame->mi_cols) {
* resize_mv_buffer(cm);
*}
*/
}
static int valid_ref_frame_size(int ref_width, int ref_height,
int this_width, int this_height) {
return 2 * this_width >= ref_width &&
2 * this_height >= ref_height &&
this_width <= 16 * ref_width &&
this_height <= 16 * ref_height;
}
/*
*static int valid_ref_frame_img_fmt(enum vpx_bit_depth_t ref_bit_depth,
* int ref_xss, int ref_yss,
* enum vpx_bit_depth_t this_bit_depth,
* int this_xss, int this_yss) {
* return ref_bit_depth == this_bit_depth && ref_xss == this_xss &&
* ref_yss == this_yss;
*}
*/
static int setup_frame_size(
struct VP9Decoder_s *pbi,
struct VP9_Common_s *cm, union param_u *params,
unsigned int *mmu_index_adr,
unsigned int *mmu_dw_index_adr,
int print_header_info) {
int width, height;
struct BufferPool_s * const pool = cm->buffer_pool;
struct PIC_BUFFER_CONFIG_s *ybf;
int ret = 0;
width = params->p.width;
height = params->p.height;
if (is_oversize(width, height)) {
pbi->error_frame_width = width;
pbi->error_frame_height = height;
vp9_print(pbi, 0, "%s, Error: Invalid frame size\n", __func__);
return -1;
}
pbi->error_frame_width = 0;
pbi->error_frame_height = 0;
/*vp9_read_frame_size(rb, &width, &height);*/
if (print_header_info)
pr_info(" * 16-bits w read : %d (width : %d)\n", width, height);
if (print_header_info)
pr_info
(" * 16-bits h read : %d (height : %d)\n", width, height);
WRITE_VREG(HEVC_PARSER_PICTURE_SIZE, (height << 16) | width);
#ifdef VP9_10B_HED_FB
WRITE_VREG(HEVC_ASSIST_PIC_SIZE_FB_READ, (height << 16) | width);
#endif
if (pbi->mmu_enable && ((pbi->double_write_mode & 0x10) == 0)) {
ret = vp9_alloc_mmu(pbi,
cm->new_fb_idx,
params->p.width,
params->p.height,
params->p.bit_depth,
mmu_index_adr);
if (ret != 0) {
pr_err("can't alloc need mmu1,idx %d ret =%d\n",
cm->new_fb_idx,
ret);
return ret;
}
cm->cur_fb_idx_mmu = cm->new_fb_idx;
}
#ifdef VP9_10B_MMU_DW
if (pbi->dw_mmu_enable && (mmu_dw_index_adr != NULL)) {
ret = vp9_alloc_mmu_dw(pbi, cm->new_fb_idx,
params->p.width, params->p.height,
params->p.bit_depth, mmu_dw_index_adr);
if (ret != 0) {
pr_err("can't alloc need mmu1 dw,idx %d ret =%d\n",
cm->new_fb_idx,
ret);
return ret;
}
}
#endif
resize_context_buffers(pbi, cm, width, height);
setup_display_size(cm, params, print_header_info);
#if 0
lock_buffer_pool(pool);
if (vp9_realloc_frame_buffer(
get_frame_new_buffer(cm), cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VP9_DEC_BORDER_IN_PIXELS,
cm->byte_alignment,
&pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer,
pool->get_fb_cb, pool->cb_priv)) {
unlock_buffer_pool(pool);
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
}
unlock_buffer_pool(pool);
#else
/* porting */
ybf = get_frame_new_buffer(cm);
if (!ybf)
return -1;
ybf->y_crop_width = width;
ybf->y_crop_height = height;
ybf->bit_depth = params->p.bit_depth;
#endif
pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
pool->frame_bufs[cm->new_fb_idx].buf.bit_depth =
(unsigned int)cm->bit_depth;
pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
return ret;
}
static int setup_frame_size_with_refs(
struct VP9Decoder_s *pbi,
struct VP9_Common_s *cm,
union param_u *params,
unsigned int *mmu_index_adr,
unsigned int *mmu_dw_index_adr,
int print_header_info) {
int width, height;
int found = 0, i;
int has_valid_ref_frame = 0;
struct PIC_BUFFER_CONFIG_s *ybf;
struct BufferPool_s * const pool = cm->buffer_pool;
int ret = 0;
for (i = 0; i < REFS_PER_FRAME; ++i) {
if ((params->p.same_frame_size >>
(REFS_PER_FRAME - i - 1)) & 0x1) {
struct PIC_BUFFER_CONFIG_s *const buf =
cm->frame_refs[i].buf;
/*if (print_header_info)
* pr_info
* ("1-bit same_frame_size[%d] read : 1\n", i);
*/
width = buf->y_crop_width;
height = buf->y_crop_height;
/*if (print_header_info)
* pr_info
* (" - same_frame_size width : %d\n", width);
*/
/*if (print_header_info)
* pr_info
* (" - same_frame_size height : %d\n", height);
*/
found = 1;
break;
} else {
/*if (print_header_info)
* pr_info
* ("1-bit same_frame_size[%d] read : 0\n", i);
*/
}
}
if (!found) {
/*vp9_read_frame_size(rb, &width, &height);*/
width = params->p.width;
height = params->p.height;
/*if (print_header_info)
* pr_info
* (" * 16-bits w read : %d (width : %d)\n",
* width, height);
*if (print_header_info)
* pr_info
* (" * 16-bits h read : %d (height : %d)\n",
* width, height);
*/
}
if (is_oversize(width, height)) {
pbi->error_frame_width = width;
pbi->error_frame_height = height;
vp9_print(pbi, 0, "%s, Error: Invalid frame size\n", __func__);
return -1;
}
pbi->error_frame_width = 0;
pbi->error_frame_height = 0;
params->p.width = width;
params->p.height = height;
WRITE_VREG(HEVC_PARSER_PICTURE_SIZE, (height << 16) | width);
if (pbi->mmu_enable && ((pbi->double_write_mode & 0x10) == 0)) {
/*if(cm->prev_fb_idx >= 0) release_unused_4k(cm->prev_fb_idx);
*cm->prev_fb_idx = cm->new_fb_idx;
*/
/* pr_info
* ("[DEBUG DEBUG]Before alloc_mmu,
* prev_fb_idx : %d, new_fb_idx : %d\r\n",
* cm->prev_fb_idx, cm->new_fb_idx);
*/
ret = vp9_alloc_mmu(pbi, cm->new_fb_idx,
params->p.width, params->p.height,
params->p.bit_depth, mmu_index_adr);
if (ret != 0) {
pr_err("can't alloc need mmu,idx %d\r\n",
cm->new_fb_idx);
return ret;
}
cm->cur_fb_idx_mmu = cm->new_fb_idx;
}
#ifdef VP9_10B_MMU_DW
if (pbi->dw_mmu_enable && (mmu_dw_index_adr != NULL)) {
ret = vp9_alloc_mmu_dw(pbi, cm->new_fb_idx,
params->p.width, params->p.height,
params->p.bit_depth, mmu_dw_index_adr);
if (ret != 0) {
pr_err("can't alloc need mmu dw,idx %d\r\n",
cm->new_fb_idx);
return ret;
}
}
#endif
/*Check to make sure at least one of frames that this frame references
*has valid dimensions.
*/
for (i = 0; i < REFS_PER_FRAME; ++i) {
struct RefBuffer_s * const ref_frame = &cm->frame_refs[i];
has_valid_ref_frame |=
valid_ref_frame_size(ref_frame->buf->y_crop_width,
ref_frame->buf->y_crop_height,
width, height);
}
if (!has_valid_ref_frame) {
pr_err("Error: Referenced frame has invalid size\r\n");
return -1;
}
#if 0
for (i = 0; i < REFS_PER_FRAME; ++i) {
struct RefBuffer_s * const ref_frame =
&cm->frame_refs[i];
if (!valid_ref_frame_img_fmt(
ref_frame->buf->bit_depth,
ref_frame->buf->subsampling_x,
ref_frame->buf->subsampling_y,
cm->bit_depth,
cm->subsampling_x,
cm->subsampling_y))
pr_err
("Referenced frame incompatible color fmt\r\n");
return -1;
}
#endif
resize_context_buffers(pbi, cm, width, height);
setup_display_size(cm, params, print_header_info);
#if 0
lock_buffer_pool(pool);
if (vp9_realloc_frame_buffer(
get_frame_new_buffer(cm), cm->width, cm->height,
cm->subsampling_x, cm->subsampling_y,
#if CONFIG_VP9_HIGHBITDEPTH
cm->use_highbitdepth,
#endif
VP9_DEC_BORDER_IN_PIXELS,
cm->byte_alignment,
&pool->frame_bufs[cm->new_fb_idx].raw_frame_buffer,
pool->get_fb_cb,
pool->cb_priv)) {
unlock_buffer_pool(pool);
vpx_internal_error(&cm->error, VPX_CODEC_MEM_ERROR,
"Failed to allocate frame buffer");
}
unlock_buffer_pool(pool);
#else
/* porting */
ybf = get_frame_new_buffer(cm);
if (!ybf)
return -1;
ybf->y_crop_width = width;
ybf->y_crop_height = height;
ybf->bit_depth = params->p.bit_depth;
#endif
pool->frame_bufs[cm->new_fb_idx].buf.subsampling_x = cm->subsampling_x;
pool->frame_bufs[cm->new_fb_idx].buf.subsampling_y = cm->subsampling_y;
pool->frame_bufs[cm->new_fb_idx].buf.bit_depth =
(unsigned int)cm->bit_depth;
pool->frame_bufs[cm->new_fb_idx].buf.color_space = cm->color_space;
return ret;
}
static inline bool close_to(int a, int b, int m)
{
return (abs(a - b) < m) ? true : false;
}
#ifdef MULTI_INSTANCE_SUPPORT
static int vp9_print_cont(struct VP9Decoder_s *pbi,
int flag, const char *fmt, ...)
{
unsigned char buf[HEVC_PRINT_BUF];
int len = 0;
if (pbi == NULL ||
(flag == 0) ||
(debug & flag)) {
va_list args;
va_start(args, fmt);
vsnprintf(buf + len, HEVC_PRINT_BUF - len, fmt, args);
pr_debug("%s", buf);
va_end(args);
}
return 0;
}
static void trigger_schedule(struct VP9Decoder_s *pbi)
{
if (pbi->is_used_v4l) {
struct aml_vcodec_ctx *ctx =
(struct aml_vcodec_ctx *)(pbi->v4l2_ctx);
if (ctx->param_sets_from_ucode &&
!pbi->v4l_params_parsed)
vdec_v4l_write_frame_sync(ctx);
}
if (pbi->vdec_cb)
pbi->vdec_cb(hw_to_vdec(pbi), pbi->vdec_cb_arg);
}
static void reset_process_time(struct VP9Decoder_s *pbi)
{
if (pbi->start_process_time) {
unsigned process_time =
1000 * (jiffies - pbi->start_process_time) / HZ;
pbi->start_process_time = 0;
if (process_time > max_process_time[pbi->index])
max_process_time[pbi->index] = process_time;
}
}
static void start_process_time(struct VP9Decoder_s *pbi)
{
pbi->start_process_time = jiffies;
pbi->decode_timeout_count = 0;
pbi->last_lcu_idx = 0;
}
static void timeout_process(struct VP9Decoder_s *pbi)
{
pbi->timeout_num++;
amhevc_stop();
vp9_print(pbi,
0, "%s decoder timeout\n", __func__);
pbi->dec_result = DEC_RESULT_DONE;
reset_process_time(pbi);
vdec_schedule_work(&pbi->work);
}
static u32 get_valid_double_write_mode(struct VP9Decoder_s *pbi)
{
u32 dw = ((double_write_mode & 0x80000000) == 0) ?
pbi->double_write_mode :
(double_write_mode & 0x7fffffff);
if (dw & 0x20) {
if ((get_cpu_major_id() < AM_MESON_CPU_MAJOR_ID_T3)
&& ((dw & 0xf) == 2 || (dw & 0xf) == 3)) {
pr_info("MMU doueble write 1:4 not supported !!!\n");
dw = 0;
}
}
return dw;
}
static int get_double_write_mode(struct VP9Decoder_s *pbi)
{
u32 valid_dw_mode = get_valid_double_write_mode(pbi);
u32 dw;
int w, h;
struct VP9_Common_s *cm = &pbi->common;
struct PIC_BUFFER_CONFIG_s *cur_pic_config;
if (pbi->is_used_v4l) {
unsigned int out;
vdec_v4l_get_dw_mode(pbi->v4l2_ctx, &out);
dw = out;
return dw;
}
/* mask for supporting double write value bigger than 0x100 */
if (valid_dw_mode & 0xffffff00) {
if (!cm->cur_frame)
return 1;/*no valid frame,*/
cur_pic_config = &cm->cur_frame->buf;
w = cur_pic_config->y_crop_width;
h = cur_pic_config->y_crop_height;
dw = 0x1; /*1:1*/
switch (valid_dw_mode) {
case 0x100:
if (w > 1920 && h > 1088)
dw = 0x4; /*1:2*/
break;
case 0x200:
if (w > 1920 && h > 1088)
dw = 0x2; /*1:4*/
break;
case 0x300:
if (w > 1280 && h > 720)
dw = 0x4; /*1:2*/
break;
default:
break;
}
return dw;
}
return valid_dw_mode;
}
/* for double write buf alloc */
static int get_double_write_mode_init(struct VP9Decoder_s *pbi)
{
u32 valid_dw_mode = get_valid_double_write_mode(pbi);
u32 dw;
int w = pbi->init_pic_w;
int h = pbi->init_pic_h;
dw = 0x1; /*1:1*/
switch (valid_dw_mode) {
case 0x100:
if (w > 1920 && h > 1088)
dw = 0x4; /*1:2*/
break;
case 0x200:
if (w > 1920 && h > 1088)
dw = 0x2; /*1:4*/
break;
case 0x300:
if (w > 1280 && h > 720)
dw = 0x4; /*1:2*/
break;
default:
dw = valid_dw_mode;
break;
}
return dw;
}
#endif
//#define MAX_4K_NUM 0x1200
/* return page number */
static int vp9_mmu_page_num(struct VP9Decoder_s *pbi,
int w, int h, int save_mode)
{
int picture_size;
int cur_mmu_4k_number, max_frame_num;
picture_size = compute_losless_comp_body_size(w, h, save_mode);
cur_mmu_4k_number = ((picture_size + (PAGE_SIZE - 1)) >> PAGE_SHIFT);
max_frame_num = (vvp9_frame_mmu_map_size(pbi) >> 2);
if (cur_mmu_4k_number > max_frame_num) {
pr_err("over max !! cur_mmu_4k_number 0x%x width %d height %d\n",
cur_mmu_4k_number, w, h);
return -1;
}
return cur_mmu_4k_number;
}
static struct internal_comp_buf* v4lfb_to_icomp_buf(
struct VP9Decoder_s *pbi,
struct vdec_v4l2_buffer *fb)
{
struct aml_video_dec_buf *aml_fb = NULL;
struct aml_vcodec_ctx * v4l2_ctx = pbi->v4l2_ctx;
aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer);
return &v4l2_ctx->comp_bufs[aml_fb->internal_index];
}
static struct internal_comp_buf* index_to_icomp_buf(
struct VP9Decoder_s *pbi, int index)
{
struct aml_video_dec_buf *aml_fb = NULL;
struct aml_vcodec_ctx * v4l2_ctx = pbi->v4l2_ctx;
struct vdec_v4l2_buffer *fb = NULL;
fb = (struct vdec_v4l2_buffer *)
pbi->m_BUF[index].v4l_ref_buf_addr;
aml_fb = container_of(fb, struct aml_video_dec_buf, frame_buffer);
return &v4l2_ctx->comp_bufs[aml_fb->internal_index];
}
int vp9_alloc_mmu(
struct VP9Decoder_s *pbi,
int cur_buf_idx,
int pic_width,
int pic_height,
unsigned short bit_depth,
unsigned int *mmu_index_adr)
{
int ret;
int bit_depth_10 = (bit_depth == VPX_BITS_10);
int cur_mmu_4k_number;
if (get_double_write_mode(pbi) == 0x10)
return 0;
if (bit_depth >= VPX_BITS_12) {
pbi->fatal_error = DECODER_FATAL_ERROR_SIZE_OVERFLOW;
pr_err("fatal_error, un support bit depth 12!\n\n");
return -1;
}
cur_mmu_4k_number = vp9_mmu_page_num(pbi,
pic_width,
pic_height,
bit_depth_10);
if (cur_mmu_4k_number < 0)
return -1;
if (pbi->is_used_v4l) {
struct internal_comp_buf *ibuf =
index_to_icomp_buf(pbi, cur_buf_idx);
ret = decoder_mmu_box_alloc_idx(
ibuf->mmu_box,
ibuf->index,
ibuf->frame_buffer_size,
mmu_index_adr);
} else {
ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_START);
ret = decoder_mmu_box_alloc_idx(
pbi->mmu_box,
cur_buf_idx,
cur_mmu_4k_number,
mmu_index_adr);
ATRACE_COUNTER(pbi->trace.decode_header_memory_time_name, TRACE_HEADER_MEMORY_END);
}
return ret;
}
#ifdef VP9_10B_MMU_DW
int vp9_alloc_mmu_dw(
struct VP9Decoder_s *pbi,
int cur_buf_idx,
int pic_width,
int pic_height,
unsigned short bit_depth,
unsigned int *mmu_index_adr)
{
int ret;
int bit_depth_10 = (bit_depth == VPX_BITS_10);
int cur_mmu_4k_number;
if (pbi->is_used_v4l)
return -1;
if (get_double_write_mode(pbi) == 0x10)
return 0;
if (bit_depth >= VPX_BITS_12) {
pbi->fatal_error = DECODER_FATAL_ERROR_SIZE_OVERFLOW;
pr_err("fatal_error, un support bit depth 12!\n\n");
return -1;
}
cur_mmu_4k_number = vp9_mmu_page_num(pbi,
pic_width,
pic_height,
bit_depth_10);
if (cur_mmu_4k_number < 0)
return -1;
ret = decoder_mmu_box_alloc_idx(
pbi->mmu_box_dw,
cur_buf_idx,
cur_mmu_4k_number,
mmu_index_adr);
return ret;
}
#endif
#ifndef MV_USE_FIXED_BUF
static void dealloc_mv_bufs(struct VP9Decoder_s *pbi)
{
int i;
for (i = 0; i < MV_BUFFER_NUM; i++) {
if (pbi->m_mv_BUF[i].start_adr) {
if (debug)
pr_info(
"dealloc mv buf(%d) adr %ld size 0x%x used_flag %d\n",
i, pbi->m_mv_BUF[i].start_adr,
pbi->m_mv_BUF[i].size,
pbi->m_mv_BUF[i].used_flag);
decoder_bmmu_box_free_idx(
pbi->bmmu_box,
MV_BUFFER_IDX(i));
pbi->m_mv_BUF[i].start_adr = 0;
pbi->m_mv_BUF[i].size = 0;
pbi->m_mv_BUF[i].used_flag = 0;
}
}
}
static int alloc_mv_buf(struct VP9Decoder_s *pbi,
int i, int size)
{
int ret = 0;
if (pbi->m_mv_BUF[i].start_adr &&
size > pbi->m_mv_BUF[i].size) {
dealloc_mv_bufs(pbi);
} else if (pbi->m_mv_BUF[i].start_adr)
return 0;
if (decoder_bmmu_box_alloc_buf_phy
(pbi->bmmu_box,
MV_BUFFER_IDX(i), size,
DRIVER_NAME,
&pbi->m_mv_BUF[i].start_adr) < 0) {
pbi->m_mv_BUF[i].start_adr = 0;
ret = -1;
} else {
pbi->m_mv_BUF[i].size = size;
pbi->m_mv_BUF[i].used_flag = 0;
ret = 0;
if (debug) {
pr_info(
"MV Buffer %d: start_adr %px size %x\n",
i,
(void *)pbi->m_mv_BUF[i].start_adr,
pbi->m_mv_BUF[i].size);
}
}
return ret;
}
static int cal_mv_buf_size(struct VP9Decoder_s *pbi, int pic_width, int pic_height)
{
int lcu_size = 64; /*fixed 64*/
int pic_width_64 = (pic_width + 63) & (~0x3f);
int pic_height_32 = (pic_height + 31) & (~0x1f);
int pic_width_lcu = (pic_width_64 % lcu_size) ?
pic_width_64 / lcu_size + 1
: pic_width_64 / lcu_size;
int pic_height_lcu = (pic_height_32 % lcu_size) ?
pic_height_32 / lcu_size + 1
: pic_height_32 / lcu_size;
int lcu_total = pic_width_lcu * pic_height_lcu;
int size_a = lcu_total * 36 * 16;
int size_b = pic_width_lcu * 16 *
((pic_height_lcu >> 3) + (pic_height_lcu & 0x7));
int size = (size_a + size_b + 0xffff) &
(~0xffff);
return size;
}
static int init_mv_buf_list(struct VP9Decoder_s *pbi)
{
int i;
int ret = 0;
int count = MV_BUFFER_NUM;
int pic_width = pbi->init_pic_w;
int pic_height = pbi->init_pic_h;
int size = cal_mv_buf_size(pbi, pic_width, pic_height);
if (mv_buf_dynamic_alloc)
return 0;
if (mv_buf_margin > 0)
count = REF_FRAMES + mv_buf_margin;
if (pbi->init_pic_w > 2048 && pbi->init_pic_h > 1088)
count = REF_FRAMES_4K + mv_buf_margin;
if (debug) {
pr_info("%s w:%d, h:%d, count: %d\n",
__func__, pbi->init_pic_w, pbi->init_pic_h, count);
}
for (i = 0;
i < count && i < MV_BUFFER_NUM; i++) {
if (alloc_mv_buf(pbi, i, size) < 0) {
ret = -1;
break;
}
}
return ret;
}
static int get_mv_buf(struct VP9Decoder_s *pbi,
struct PIC_BUFFER_CONFIG_s *pic_config)
{
int i;
int ret = -1;
if (mv_buf_dynamic_alloc) {
union param_u *params = &pbi->vp9_param;
int size = cal_mv_buf_size(pbi,
params->p.width, params->p.height);
for (i = 0; i < MV_BUFFER_NUM; i++) {
if (pbi->m_mv_BUF[i].start_adr == 0) {
ret = i;
break;
}
}
if (i == MV_BUFFER_NUM) {
pr_info(
"%s: Error, mv buf MV_BUFFER_NUM is not enough\n",
__func__);
return ret;
}
if (alloc_mv_buf(pbi, ret, size) >= 0) {
pic_config->mv_buf_index = ret;
pic_config->mpred_mv_wr_start_addr =
(pbi->m_mv_BUF[ret].start_adr + 0xffff) &
(~0xffff);
pic_config->mv_size = size;
if (debug & VP9_DEBUG_BUFMGR_MORE)
pr_info(
"%s alloc => %d (%ld) size 0x%x\n",
__func__, ret,
pic_config->mpred_mv_wr_start_addr,
pic_config->mv_size);
} else {
pr_info(
"%s: Error, mv buf alloc fail\n",
__func__);
}
return ret;
}
for (i = 0; i < MV_BUFFER_NUM; i++) {
if (pbi->m_mv_BUF[i].start_adr &&
pbi->m_mv_BUF[i].used_flag == 0) {
pbi->m_mv_BUF[i].used_flag = 1;
ret = i;
break;
}
}
if (ret >= 0) {
pic_config->mv_buf_index = ret;
pic_config->mpred_mv_wr_start_addr =
(pbi->m_mv_BUF[ret].start_adr + 0xffff) &
(~0xffff);
pic_config->mv_size = pbi->m_mv_BUF[ret].size;
if (debug & VP9_DEBUG_BUFMGR_MORE)
pr_info(
"%s => %d (%lx) size 0x%x\n",
__func__, ret,
pic_config->mpred_mv_wr_start_addr,
pic_config->mv_size);
} else {
pr_info(
"%s: Error, mv buf is not enough\n",
__func__);
}
return ret;
}
static void put_mv_buf(struct VP9Decoder_s *pbi,
int *mv_buf_index)
{
int i = *mv_buf_index;
if (i >= MV_BUFFER_NUM) {
if (debug & VP9_DEBUG_BUFMGR_MORE)
pr_info(
"%s: index %d beyond range\n",
__func__, i);
return;
}
if (mv_buf_dynamic_alloc) {
if (pbi->m_mv_BUF[i].start_adr) {
if (debug)
pr_info(
"dealloc mv buf(%d) adr %ld size 0x%x used_flag %d\n",
i, pbi->m_mv_BUF[i].start_adr,
pbi->m_mv_BUF[i].size,
pbi->m_mv_BUF[i].used_flag);
decoder_bmmu_box_free_idx(
pbi->bmmu_box,
MV_BUFFER_IDX(i));
pbi->m_mv_BUF[i].start_adr = 0;
pbi->m_mv_BUF[i].size = 0;
pbi->m_mv_BUF[i].used_flag = 0;
}
*mv_buf_index = -1;
return;
}
if (debug & VP9_DEBUG_BUFMGR_MORE)
pr_info(
"%s(%d): used_flag(%d)\n",
__func__, i,
pbi->m_mv_BUF[i].used_flag);
*mv_buf_index = -1;
if (pbi->m_mv_BUF[i].start_adr &&
pbi->m_mv_BUF[i].used_flag)
pbi->m_mv_BUF[i].used_flag = 0;
}
static void put_un_used_mv_bufs(struct VP9Decoder_s *pbi)
{
struct VP9_Common_s *const cm = &pbi->common;
struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs;
int i;
for (i = 0; i < pbi->used_buf_num; ++i) {
#if 0
if ((frame_bufs[i].ref_count == 0) &&
(frame_bufs[i].buf.index != -1) &&
(frame_bufs[i].buf.mv_buf_index >= 0)
)
#else
if ((&frame_bufs[i] != cm->prev_frame) &&
(frame_bufs[i].buf.index != -1) &&
(frame_bufs[i].buf.mv_buf_index >= 0)
)
#endif
put_mv_buf(pbi, &frame_bufs[i].buf.mv_buf_index);
}
}
#ifdef SUPPORT_FB_DECODING
static bool mv_buf_available(struct VP9Decoder_s *pbi)
{
int i;
bool ret = 0;
for (i = 0; i < MV_BUFFER_NUM; i++) {
if (pbi->m_mv_BUF[i].start_adr &&
pbi->m_mv_BUF[i].used_flag == 0) {
ret = 1;
break;
}
}
return ret;
}
#endif
#endif
#ifdef SUPPORT_FB_DECODING
static void init_stage_buf(struct VP9Decoder_s *pbi)
{
uint i;
for (i = 0; i < STAGE_MAX_BUFFERS
&& i < stage_buf_num; i++) {
pbi->stage_bufs[i] =
vmalloc(sizeof(struct stage_buf_s));
if (pbi->stage_bufs[i] == NULL) {
vp9_print(pbi,
0, "%s vmalloc fail\n", __func__);
break;
}
pbi->stage_bufs[i]->index = i;
}
pbi->used_stage_buf_num = i;
pbi->s1_pos = 0;
pbi->s2_pos = 0;
pbi->s1_buf = NULL;
pbi->s2_buf = NULL;
pbi->s1_mv_buf_index = FRAME_BUFFERS;
pbi->s1_mv_buf_index_pre = FRAME_BUFFERS;
pbi->s1_mv_buf_index_pre_pre = FRAME_BUFFERS;
if (pbi->used_stage_buf_num > 0)
vp9_print(pbi,
0, "%s 2 stage decoding buf %d\n",
__func__,
pbi->used_stage_buf_num);
}
static void uninit_stage_buf(struct VP9Decoder_s *pbi)
{
int i;
for (i = 0; i < pbi->used_stage_buf_num; i++) {
if (pbi->stage_bufs[i])
vfree(pbi->stage_bufs[i]);
pbi->stage_bufs[i] = NULL;
}
pbi->used_stage_buf_num = 0;
pbi->s1_pos = 0;
pbi->s2_pos = 0;
pbi->s1_buf = NULL;
pbi->s2_buf = NULL;
}
static int get_s1_buf(
struct VP9Decoder_s *pbi)
{
struct stage_buf_s *buf = NULL;
int ret = -1;
int buf_page_num = MAX_STAGE_PAGE_NUM;
int next_s1_pos = pbi->s1_pos + 1;
if (next_s1_pos >= pbi->used_stage_buf_num)
next_s1_pos = 0;
if (next_s1_pos == pbi->s2_pos) {
pbi->s1_buf = NULL;
return ret;
}
buf = pbi->stage_bufs[pbi->s1_pos];
ret = decoder_mmu_box_alloc_idx(
pbi->mmu_box,
buf->index,
buf_page_num,
pbi->stage_mmu_map_addr);
if (ret < 0) {
vp9_print(pbi, 0,
"%s decoder_mmu_box_alloc fail for index %d (s1_pos %d s2_pos %d)\n",
__func__, buf->index,
pbi->s1_pos, pbi->s2_pos);
buf = NULL;
} else {
vp9_print(pbi, VP9_DEBUG_2_STAGE,
"%s decoder_mmu_box_alloc %d page for index %d (s1_pos %d s2_pos %d)\n",
__func__, buf_page_num, buf->index,
pbi->s1_pos, pbi->s2_pos);
}
pbi->s1_buf = buf;
return ret;
}
static void inc_s1_pos(struct VP9Decoder_s *pbi)
{
struct stage_buf_s *buf =
pbi->stage_bufs[pbi->s1_pos];
int used_page_num =
#ifdef FB_DECODING_TEST_SCHEDULE
MAX_STAGE_PAGE_NUM/2;
#else
(READ_VREG(HEVC_ASSIST_HED_FB_W_CTL) >> 16);
#endif
decoder_mmu_box_free_idx_tail(pbi->mmu_box,
FRAME_BUFFERS + buf->index, used_page_num);
pbi->s1_pos++;
if (pbi->s1_pos >= pbi->used_stage_buf_num)
pbi->s1_pos = 0;
vp9_print(pbi, VP9_DEBUG_2_STAGE,
"%s (used_page_num %d) for index %d (s1_pos %d s2_pos %d)\n",
__func__, used_page_num, buf->index,
pbi->s1_pos, pbi->s2_pos);
}
#define s2_buf_available(pbi) (pbi->s1_pos != pbi->s2_pos)
static int get_s2_buf(
struct VP9Decoder_s *pbi)
{
int ret = -1;
struct stage_buf_s *buf = NULL;
if (s2_buf_available(pbi)) {
buf = pbi->stage_bufs[pbi->s2_pos];
vp9_print(pbi, VP9_DEBUG_2_STAGE,
"%s for index %d (s1_pos %d s2_pos %d)\n",
__func__, buf->index,
pbi->s1_pos, pbi->s2_pos);
pbi->s2_buf = buf;
ret = 0;
}
return ret;
}
static void inc_s2_pos(struct VP9Decoder_s *pbi)
{
struct stage_buf_s *buf =
pbi->stage_bufs[pbi->s2_pos];
decoder_mmu_box_free_idx(pbi->mmu_box,
FRAME_BUFFERS + buf->index);
pbi->s2_pos++;
if (pbi->s2_pos >= pbi->used_stage_buf_num)
pbi->s2_pos = 0;
vp9_print(pbi, VP9_DEBUG_2_STAGE,
"%s for index %d (s1_pos %d s2_pos %d)\n",
__func__, buf->index,
pbi->s1_pos, pbi->s2_pos);
}
static int get_free_stage_buf_num(struct VP9Decoder_s *pbi)
{
int num;
if (pbi->s1_pos >= pbi->s2_pos)
num = pbi->used_stage_buf_num -
(pbi->s1_pos - pbi->s2_pos) - 1;
else
num = (pbi->s2_pos - pbi->s1_pos) - 1;
return num;
}
#ifndef FB_DECODING_TEST_SCHEDULE
static DEFINE_SPINLOCK(fb_core_spin_lock);
static u8 is_s2_decoding_finished(struct VP9Decoder_s *pbi)
{
/* to do: VLSI review
completion of last LCU decoding in BACK
*/
return 1;
}
static void start_s1_decoding(struct VP9Decoder_s *pbi)
{
/* to do: VLSI review
after parser, how to start LCU decoding in BACK
*/
}
static void fb_reset_core(struct vdec_s *vdec, u32 mask)
{
/* to do: VLSI review
1. how to disconnect DMC for FRONT and BACK
2. reset bit 13, 24, FRONT or BACK ??
*/
unsigned long flags;
u32 reset_bits = 0;
if (mask & HW_MASK_FRONT)
WRITE_VREG(HEVC_STREAM_CONTROL, 0);
spin_lock_irqsave(&fb_core_spin_lock, flags);
codec_dmcbus_write(DMC_REQ_CTRL,
codec_dmcbus_read(DMC_REQ_CTRL) & (~(1 << 4)));
spin_unlock_irqrestore(&fb_core_spin_lock, flags);
while (!(codec_dmcbus_read(DMC_CHAN_STS)
& (1 << 4)))
;
if ((mask & HW_MASK_FRONT) &&
input_frame_based(vdec))
WRITE_VREG(HEVC_STREAM_CONTROL, 0);
/*
* 2: assist
* 3: parser
* 4: parser_state
* 8: dblk
* 11:mcpu
* 12:ccpu
* 13:ddr
* 14:iqit
* 15:ipp
* 17:qdct
* 18:mpred
* 19:sao
* 24:hevc_afifo
*/
if (mask & HW_MASK_FRONT) {
reset_bits =
(1<<3)|(1<<4)|(1<<11)|
(1<<12)|(1<<18);
}
if (mask & HW_MASK_BACK) {
reset_bits =
(1<<8)|(1<<13)|(1<<14)|(1<<15)|
(1<<17)|(1<<19)|(1<<24);
}
WRITE_VREG(DOS_SW_RESET3, reset_bits);
#if 0
(1<<3)|(1<<4)|(1<<8)|(1<<11)|
(1<<12)|(1<<13)|(1<<14)|(1<<15)|
(1<<17)|(1<<18)|(1<<19)|(1<<24);
#endif
WRITE_VREG(DOS_SW_RESET3, 0);
spin_lock_irqsave(&fb_core_spin_lock, flags);
codec_dmcbus_write(DMC_REQ_CTRL,
codec_dmcbus_read(DMC_REQ_CTRL) | (1 << 4));
spin_unlock_irqrestore(&fb_core_spin_lock, flags);
}
#endif
#endif
static void init_pic_list_hw(struct VP9Decoder_s *pbi);
static int get_free_fb(struct VP9Decoder_s *pbi)
{
struct VP9_Common_s *const cm = &pbi->common;
struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs;
int i;
unsigned long flags;
lock_buffer_pool(cm->buffer_pool, flags);
if (debug & VP9_DEBUG_BUFMGR_MORE) {
for (i = 0; i < pbi->used_buf_num; ++i) {
pr_info("%s:%d, ref_count %d vf_ref %d index %d\r\n",
__func__, i, frame_bufs[i].ref_count,
frame_bufs[i].buf.vf_ref,
frame_bufs[i].buf.index);
}
}
for (i = 0; i < pbi->used_buf_num; ++i) {
if ((frame_bufs[i].ref_count == 0) &&
(frame_bufs[i].buf.vf_ref == 0) &&
(frame_bufs[i].buf.index != -1) &&
(cm->cur_frame != &frame_bufs[i])
)
break;
}
if (i != pbi->used_buf_num) {
frame_bufs[i].ref_count = 1;
/*pr_info("[MMU DEBUG 1] set ref_count[%d] : %d\r\n",
i, frame_bufs[i].ref_count);*/
} else {
/* Reset i to be INVALID_IDX to indicate
no free buffer found*/
i = INVALID_IDX;
}
unlock_buffer_pool(cm->buffer_pool, flags);
return i;
}
static void update_hide_frame_timestamp(struct VP9Decoder_s *pbi)
{
struct RefCntBuffer_s *const frame_bufs =
pbi->common.buffer_pool->frame_bufs;
int i;
for (i = 0; i < pbi->used_buf_num; ++i) {
if ((!frame_bufs[i].show_frame) &&
(!frame_bufs[i].buf.vf_ref) &&
(frame_bufs[i].buf.BUF_index != -1)) {
frame_bufs[i].buf.timestamp = pbi->chunk->timestamp;
vp9_print(pbi, VP9_DEBUG_OUT_PTS,
"%s, update %d hide frame ts: %lld\n",
__func__, i, frame_bufs[i].buf.timestamp);
}
}
}
static int get_free_fb_idx(struct VP9Decoder_s *pbi)
{
int i;
struct VP9_Common_s *const cm = &pbi->common;
struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs;
for (i = 0; i < pbi->used_buf_num; ++i) {
if ((frame_bufs[i].ref_count == 0) &&
(frame_bufs[i].buf.vf_ref == 0))
break;
}
return i;
}
static int v4l_get_free_fb(struct VP9Decoder_s *pbi)
{
struct VP9_Common_s *const cm = &pbi->common;
struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs;
struct aml_vcodec_ctx * v4l = pbi->v4l2_ctx;
struct v4l_buff_pool *pool = &v4l->cap_pool;
struct PIC_BUFFER_CONFIG_s *pic = NULL;
struct PIC_BUFFER_CONFIG_s *free_pic = NULL;
ulong flags;
int idx, i;
lock_buffer_pool(cm->buffer_pool, flags);
for (i = 0; i < pool->in; ++i) {
u32 state = (pool->seq[i] >> 16);
u32 index = (pool->seq[i] & 0xffff);
switch (state) {
case V4L_CAP_BUFF_IN_DEC:
pic = &frame_bufs[i].buf;
if ((frame_bufs[i].ref_count == 0) &&
(pic->vf_ref == 0) &&
(pic->index != -1) &&
pic->cma_alloc_addr) {
free_pic = pic;
}
break;
case V4L_CAP_BUFF_IN_M2M:
idx = get_free_fb_idx(pbi);
pic = &frame_bufs[idx].buf;
pic->y_crop_width = pbi->frame_width;
pic->y_crop_height = pbi->frame_height;
pbi->buffer_wrap[idx] = index;
if (!v4l_alloc_and_config_pic(pbi, pic)) {
set_canvas(pbi, pic);
init_pic_list_hw(pbi);
free_pic = pic;
}
break;
default:
break;
}
if (free_pic) {
frame_bufs[i].ref_count = 1;
break;
}
}
if (free_pic && pbi->chunk) {
free_pic->timestamp = pbi->chunk->timestamp;
update_hide_frame_timestamp(pbi);
}
unlock_buffer_pool(cm->buffer_pool, flags);
if (free_pic) {
struct vdec_v4l2_buffer *fb =
(struct vdec_v4l2_buffer *)
pbi->m_BUF[i].v4l_ref_buf_addr;
fb->status = FB_ST_DECODER;
}
if (debug & VP9_DEBUG_OUT_PTS) {
if (free_pic) {
pr_debug("%s, idx: %d, ts: %lld\n",
__func__, free_pic->index, free_pic->timestamp);
} else {
pr_debug("%s, vp9 get free pic null\n", __func__);
}
}
return free_pic ? free_pic->index : INVALID_IDX;
}
static int get_free_buf_count(struct VP9Decoder_s *pbi)
{
struct VP9_Common_s *const cm = &pbi->common;
struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs;
struct aml_vcodec_ctx *ctx = (struct aml_vcodec_ctx *)(pbi->v4l2_ctx);
int i, free_buf_count = 0;
if (pbi->is_used_v4l) {
for (i = 0; i < pbi->used_buf_num; ++i) {
if ((frame_bufs[i].ref_count == 0) &&
(frame_bufs[i].buf.vf_ref == 0) &&
frame_bufs[i].buf.cma_alloc_addr) {
free_buf_count++;
}
}
if (ctx->cap_pool.dec < pbi->used_buf_num) {
free_buf_count +=
v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx);
}
/* trigger to parse head data. */
if (!pbi->v4l_params_parsed) {
free_buf_count = pbi->run_ready_min_buf_num;
}
} else {
for (i = 0; i < pbi->used_buf_num; ++i) {
if ((frame_bufs[i].ref_count == 0) &&
(frame_bufs[i].buf.vf_ref == 0) &&
(frame_bufs[i].buf.index != -1))
free_buf_count++;
}
}
return free_buf_count;
}
static void decrease_ref_count(int idx, struct RefCntBuffer_s *const frame_bufs,
struct BufferPool_s *const pool)
{
if (idx >= 0) {
--frame_bufs[idx].ref_count;
/*pr_info("[MMU DEBUG 7] dec ref_count[%d] : %d\r\n", idx,
* frame_bufs[idx].ref_count);
*/
/*A worker may only get a free framebuffer index when
*calling get_free_fb. But the private buffer is not set up
*until finish decoding header. So any error happens during
*decoding header, the frame_bufs will not have valid priv
*buffer.
*/
if (frame_bufs[idx].ref_count == 0 &&
frame_bufs[idx].raw_frame_buffer.priv)
vp9_release_frame_buffer
(&frame_bufs[idx].raw_frame_buffer);
}
}
static void generate_next_ref_frames(struct VP9Decoder_s *pbi)
{
struct VP9_Common_s *const cm = &pbi->common;
struct RefCntBuffer_s *frame_bufs = cm->buffer_pool->frame_bufs;
struct BufferPool_s *const pool = cm->buffer_pool;
int mask, ref_index = 0;
unsigned long flags;
/* Generate next_ref_frame_map.*/
lock_buffer_pool(pool, flags);
for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
if (mask & 1) {
cm->next_ref_frame_map[ref_index] = cm->new_fb_idx;
++frame_bufs[cm->new_fb_idx].ref_count;
/*pr_info("[MMU DEBUG 4] inc ref_count[%d] : %d\r\n",
*cm->new_fb_idx, frame_bufs[cm->new_fb_idx].ref_count);
*/
} else
cm->next_ref_frame_map[ref_index] =
cm->ref_frame_map[ref_index];
/* Current thread holds the reference frame.*/
if (cm->ref_frame_map[ref_index] >= 0) {
++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
/*pr_info
*("[MMU DEBUG 5] inc ref_count[%d] : %d\r\n",
*cm->ref_frame_map[ref_index],
*frame_bufs[cm->ref_frame_map[ref_index]].ref_count);
*/
}
++ref_index;
}
for (; ref_index < REF_FRAMES; ++ref_index) {
cm->next_ref_frame_map[ref_index] =
cm->ref_frame_map[ref_index];
/* Current thread holds the reference frame.*/
if (cm->ref_frame_map[ref_index] >= 0) {
++frame_bufs[cm->ref_frame_map[ref_index]].ref_count;
/*pr_info("[MMU DEBUG 6] inc ref_count[%d] : %d\r\n",
*cm->ref_frame_map[ref_index],
*frame_bufs[cm->ref_frame_map[ref_index]].ref_count);
*/
}
}
unlock_buffer_pool(pool, flags);
return;
}
static void refresh_ref_frames(struct VP9Decoder_s *pbi)
{
struct VP9_Common_s *const cm = &pbi->common;
struct BufferPool_s *pool = cm->buffer_pool;
struct RefCntBuffer_s *frame_bufs = cm->buffer_pool->frame_bufs;
int mask, ref_index = 0;
unsigned long flags;
lock_buffer_pool(pool, flags);
for (mask = pbi->refresh_frame_flags; mask; mask >>= 1) {
const int old_idx = cm->ref_frame_map[ref_index];
/*Current thread releases the holding of reference frame.*/
decrease_ref_count(old_idx, frame_bufs, pool);
/*Release the reference frame in reference map.*/
if ((mask & 1) && old_idx >= 0)
decrease_ref_count(old_idx, frame_bufs, pool);
cm->ref_frame_map[ref_index] =
cm->next_ref_frame_map[ref_index];
++ref_index;
}
/*Current thread releases the holding of reference frame.*/
for (; ref_index < REF_FRAMES && !cm->show_existing_frame;
++ref_index) {
const int old_idx = cm->ref_frame_map[ref_index];
decrease_ref_count(old_idx, frame_bufs, pool);
cm->ref_frame_map[ref_index] =
cm->next_ref_frame_map[ref_index];
}
unlock_buffer_pool(pool, flags);
return;
}
int vp9_bufmgr_process(struct VP9Decoder_s *pbi, union param_u *params)
{
struct VP9_Common_s *const cm = &pbi->common;
struct BufferPool_s *pool = cm->buffer_pool;
struct RefCntBuffer_s *frame_bufs = cm->buffer_pool->frame_bufs;
struct PIC_BUFFER_CONFIG_s *pic = NULL;
int i;
int ret;
pbi->ready_for_new_data = 0;
if ((pbi->has_keyframe == 0) &&
(params->p.frame_type != KEY_FRAME) &&
(!params->p.intra_only)){
on_no_keyframe_skiped++;
pr_info("vp9_bufmgr_process no key frame return\n");
return -2;
}
pbi->has_keyframe = 1;
on_no_keyframe_skiped = 0;
#if 0
if (pbi->mmu_enable) {
if (!pbi->m_ins_flag)
pbi->used_4k_num = (READ_VREG(HEVC_SAO_MMU_STATUS) >> 16);
if (cm->prev_fb_idx >= 0) {
decoder_mmu_box_free_idx_tail(pbi->mmu_box,
cm->prev_fb_idx, pbi->used_4k_num);
}
}
#endif
if (cm->new_fb_idx >= 0
&& frame_bufs[cm->new_fb_idx].ref_count == 0){
vp9_release_frame_buffer
(&frame_bufs[cm->new_fb_idx].raw_frame_buffer);
}
/*pr_info("Before get_free_fb, prev_fb_idx : %d, new_fb_idx : %d\r\n",
cm->prev_fb_idx, cm->new_fb_idx);*/
#ifndef MV_USE_FIXED_BUF
put_un_used_mv_bufs(pbi);
if (debug & VP9_DEBUG_BUFMGR_DETAIL)
dump_pic_list(pbi);
#endif
cm->new_fb_idx = pbi->is_used_v4l ?
v4l_get_free_fb(pbi) :
get_free_fb(pbi);
if (cm->new_fb_idx == INVALID_IDX) {
pr_info("get_free_fb error\r\n");
return -1;
}
#ifndef MV_USE_FIXED_BUF
#ifdef SUPPORT_FB_DECODING
if (pbi->used_stage_buf_num == 0) {
#endif
if (get_mv_buf(pbi,
&pool->frame_bufs[cm->new_fb_idx].
buf) < 0) {
pr_info("get_mv_buf fail\r\n");
return -1;
}
if (debug & VP9_DEBUG_BUFMGR_DETAIL)
dump_pic_list(pbi);
#ifdef SUPPORT_FB_DECODING
}
#endif
#endif
cm->cur_frame = &pool->frame_bufs[cm->new_fb_idx];
/*if (debug & VP9_DEBUG_BUFMGR)
pr_info("[VP9 DEBUG]%s(get_free_fb): %d\r\n", __func__,
cm->new_fb_idx);*/
pbi->cur_buf = &frame_bufs[cm->new_fb_idx];
if (pbi->mmu_enable) {
/* moved to after picture size ready
*alloc_mmu(cm, params->p.width, params->p.height,
*params->p.bit_depth, pbi->frame_mmu_map_addr);
*/
cm->prev_fb_idx = cm->new_fb_idx;
}
/*read_uncompressed_header()*/
cm->last_frame_type = cm->frame_type;
cm->last_intra_only = cm->intra_only;
cm->profile = params->p.profile;
if (cm->profile >= MAX_PROFILES) {
pr_err("Error: Unsupported profile %d\r\n", cm->profile);
return -1;
}
cm->show_existing_frame = params->p.show_existing_frame;
if (cm->show_existing_frame) {
/* Show an existing frame directly.*/
int frame_to_show_idx = params->p.frame_to_show_idx;
int frame_to_show;
unsigned long flags;
if (frame_to_show_idx >= REF_FRAMES) {
pr_info("frame_to_show_idx %d exceed max index\r\n",
frame_to_show_idx);
return -1;
}
frame_to_show = cm->ref_frame_map[frame_to_show_idx];
/*pr_info("frame_to_show %d\r\n", frame_to_show);*/
lock_buffer_pool(pool, flags);
if (frame_to_show < 0 ||
frame_bufs[frame_to_show].ref_count < 1) {
unlock_buffer_pool(pool, flags);
pr_err
("Error:Buffer %d does not contain a decoded frame",
frame_to_show);
return -1;
}
ref_cnt_fb(frame_bufs, &cm->new_fb_idx, frame_to_show);
unlock_buffer_pool(pool, flags);
pbi->refresh_frame_flags = 0;
/*cm->lf.filter_level = 0;*/
cm->show_frame = 1;
cm->cur_frame->show_frame = 1;
/*
*if (pbi->frame_parallel_decode) {
* for (i = 0; i < REF_FRAMES; ++i)
* cm->next_ref_frame_map[i] =
* cm->ref_frame_map[i];
*}
*/
/* do not decode, search next start code */
return 1;
}
cm->frame_type = params->p.frame_type;
cm->show_frame = params->p.show_frame;
cm->bit_depth = params->p.bit_depth;
cm->error_resilient_mode = params->p.error_resilient_mode;
cm->cur_frame->show_frame = cm->show_frame;
if (cm->frame_type == KEY_FRAME) {
pbi->refresh_frame_flags = (1 << REF_FRAMES) - 1;
for (i = 0; i < REFS_PER_FRAME; ++i) {
cm->frame_refs[i].idx = INVALID_IDX;
cm->frame_refs[i].buf = NULL;
}
#ifdef VP9_10B_MMU_DW
ret = setup_frame_size(pbi,
cm, params,
pbi->frame_mmu_map_addr,
pbi->frame_mmu_dw_map_addr,
print_header_info);
#else
ret = setup_frame_size(pbi,
cm, params,
pbi->frame_mmu_map_addr,
NULL,
print_header_info);
#endif
if (ret)
return -1;
if (pbi->need_resync) {
memset(&cm->ref_frame_map, -1,
sizeof(cm->ref_frame_map));
pbi->need_resync = 0;
}
} else {
cm->intra_only = cm->show_frame ? 0 : params->p.intra_only;
/*if (print_header_info) {
* if (cm->show_frame)
* pr_info
* ("intra_only set to 0 because of show_frame\n");
* else
* pr_info
* ("1-bit intra_only read: %d\n", cm->intra_only);
*}
*/
cm->reset_frame_context = cm->error_resilient_mode ?
0 : params->p.reset_frame_context;
if (print_header_info) {
if (cm->error_resilient_mode)
pr_info
("reset to 0 error_resilient_mode\n");
else
pr_info
(" * 2-bits reset_frame_context read : %d\n",
cm->reset_frame_context);
}
if (cm->intra_only) {
if (cm->profile > PROFILE_0) {
/*read_bitdepth_colorspace_sampling(cm,
* rb, print_header_info);
*/
} else {
/*NOTE: The intra-only frame header
*does not include the specification
*of either the color format or
*color sub-sampling
*in profile 0. VP9 specifies that the default
*color format should be YUV 4:2:0 in this
*case (normative).
*/
cm->color_space = VPX_CS_BT_601;
cm->subsampling_y = cm->subsampling_x = 1;
cm->bit_depth = VPX_BITS_8;
cm->use_highbitdepth = 0;
}
pbi->refresh_frame_flags =
params->p.refresh_frame_flags;
/*if (print_header_info)
* pr_info("*%d-bits refresh_frame read:0x%x\n",
* REF_FRAMES, pbi->refresh_frame_flags);
*/
#ifdef VP9_10B_MMU_DW
ret = setup_frame_size(pbi,
cm,
params,
pbi->frame_mmu_map_addr,
pbi->frame_mmu_dw_map_addr,
print_header_info);
#else
ret = setup_frame_size(pbi,
cm,
params,
pbi->frame_mmu_map_addr,
NULL,
print_header_info);
#endif
if (ret)
return -1;
if (pbi->need_resync) {
memset(&cm->ref_frame_map, -1,
sizeof(cm->ref_frame_map));
pbi->need_resync = 0;
}
} else if (pbi->need_resync != 1) { /* Skip if need resync */
pbi->refresh_frame_flags =
params->p.refresh_frame_flags;
if (print_header_info)
pr_info
("*%d-bits refresh_frame read:0x%x\n",
REF_FRAMES, pbi->refresh_frame_flags);
for (i = 0; i < REFS_PER_FRAME; ++i) {
const int ref =
(params->p.ref_info >>
(((REFS_PER_FRAME-i-1)*4)+1))
& 0x7;
const int idx =
cm->ref_frame_map[ref];
struct RefBuffer_s * const ref_frame =
&cm->frame_refs[i];
if (print_header_info)
pr_info("*%d-bits ref[%d]read:%d\n",
REF_FRAMES_LOG2, i, ref);
ref_frame->idx = idx;
ref_frame->buf = &frame_bufs[idx].buf;
cm->ref_frame_sign_bias[LAST_FRAME + i]
= (params->p.ref_info >>
((REFS_PER_FRAME-i-1)*4)) & 0x1;
if (print_header_info)
pr_info("1bit ref_frame_sign_bias");
/*pr_info
*("%dread: %d\n",
*LAST_FRAME+i,
*cm->ref_frame_sign_bias
*[LAST_FRAME + i]);
*/
/*pr_info
*("[VP9 DEBUG]%s(get ref):%d\r\n",
*__func__, ref_frame->idx);
*/
}
#ifdef VP9_10B_MMU_DW
ret = setup_frame_size_with_refs(
pbi,
cm,
params,
pbi->frame_mmu_map_addr,
pbi->frame_mmu_dw_map_addr,
print_header_info);
#else
ret = setup_frame_size_with_refs(
pbi,
cm,
params,
pbi->frame_mmu_map_addr,
NULL,
print_header_info);
#endif
if (ret)
return -1;
for (i = 0; i < REFS_PER_FRAME; ++i) {
/*struct RefBuffer_s *const ref_buf =
*&cm->frame_refs[i];
*/
/* to do:
*vp9_setup_scale_factors_for_frame
*/
}
}
}
pic = get_frame_new_buffer(cm);
if (!pic)
return -1;
pic->bit_depth = cm->bit_depth;
pic->color_space = cm->color_space;
pic->slice_type = cm->frame_type;
if (pbi->need_resync) {
pr_err
("Error: Keyframe/intra-only frame required to reset\r\n");
return -1;
}
generate_next_ref_frames(pbi);
pbi->hold_ref_buf = 1;
#if 0
if (frame_is_intra_only(cm) || cm->error_resilient_mode)
vp9_setup_past_independence(cm);
setup_loopfilter(&cm->lf, rb, print_header_info);
setup_quantization(cm, &pbi->mb, rb, print_header_info);
setup_segmentation(&cm->seg, rb, print_header_info);
setup_segmentation_dequant(cm, print_header_info);
setup_tile_info(cm, rb, print_header_info);
sz = vp9_rb_read_literal(rb, 16);
if (print_header_info)
pr_info(" * 16-bits size read : %d (0x%x)\n", sz, sz);
if (sz == 0)
vpx_internal_error(&cm->error, VPX_CODEC_CORRUPT_FRAME,
"Invalid header size");
#endif
/*end read_uncompressed_header()*/
cm->use_prev_frame_mvs = !cm->error_resilient_mode &&
cm->width == cm->last_width &&
cm->height == cm->last_height &&
!cm->last_intra_only &&
cm->last_show_frame &&
(cm->last_frame_type != KEY_FRAME);
/*pr_info
*("set use_prev_frame_mvs to %d (last_width %d last_height %d",
*cm->use_prev_frame_mvs, cm->last_width, cm->last_height);
*pr_info
*(" last_intra_only %d last_show_frame %d last_frame_type %d)\n",
*cm->last_intra_only, cm->last_show_frame, cm->last_frame_type);
*/
if (pbi->enable_fence && cm->show_frame) {
struct PIC_BUFFER_CONFIG_s *pic = &cm->cur_frame->buf;
struct vdec_s *vdec = hw_to_vdec(pbi);
/* create fence for each buffers. */
ret = vdec_timeline_create_fence(vdec->sync);
if (ret < 0)
return ret;
pic->fence = vdec->sync->fence;
pic->bit_depth = cm->bit_depth;
pic->slice_type = cm->frame_type;
pic->stream_offset = pbi->pre_stream_offset;
if (pbi->chunk) {
pic->pts = pbi->chunk->pts;
pic->pts64 = pbi->chunk->pts64;
pic->timestamp = pbi->chunk->timestamp;
}
/* post video vframe. */
prepare_display_buf(pbi, pic);
}
return 0;
}
void swap_frame_buffers(struct VP9Decoder_s *pbi)
{
int ref_index = 0;
struct VP9_Common_s *const cm = &pbi->common;
struct BufferPool_s *const pool = cm->buffer_pool;
struct RefCntBuffer_s *const frame_bufs = cm->buffer_pool->frame_bufs;
unsigned long flags;
refresh_ref_frames(pbi);
pbi->hold_ref_buf = 0;
cm->frame_to_show = get_frame_new_buffer(cm);
if (cm->frame_to_show) {
/*if (!pbi->frame_parallel_decode || !cm->show_frame) {*/
lock_buffer_pool(pool, flags);
--frame_bufs[cm->new_fb_idx].ref_count;
/*pr_info("[MMU DEBUG 8] dec ref_count[%d] : %d\r\n", cm->new_fb_idx,
* frame_bufs[cm->new_fb_idx].ref_count);
*/
unlock_buffer_pool(pool, flags);
/*}*/
}